code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE__ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""DPTFeatureExtractor"""]
SCREAMING_SNAKE_CASE__ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = KandinskyVaaPipeline
lowerCAmelCase__ = [
"image_embeds",
"negative_image_embeds",
]
lowerCAmelCase__ = ["image_embeds", "negative_image_embeds"]
lowerCAmelCase__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCAmelCase__ = False
@property
def A__ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A__ ( self ) -> str:
'''simple docstring'''
return 100
@property
def A__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowercase_ = UNetaDConditionModel(**UpperCAmelCase )
return model
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.dummy_unet
lowercase_ = self.dummy_movq
lowercase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , )
lowercase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
'''simple docstring'''
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowercase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase )
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = "cpu"
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = pipe(**self.get_dummy_inputs(UpperCAmelCase ) )
lowercase_ = output.images
lowercase_ = pipe(
**self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowercase_ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase )
lowercase_ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowercase_ = pipeline.to(UpperCAmelCase )
pipeline.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = "red cat, 4k photo"
lowercase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowercase_ , lowercase_ = pipe_prior(
UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowercase_ = torch.Generator(device="cuda" ).manual_seed(0 )
lowercase_ = pipeline(
image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
lowercase_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
| 361
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = (1 - _cos) / 2
lowercase_ = 1 - _cos
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = (1 + _cos) / 2
lowercase_ = -1 - _cos
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = _sin / 2
lowercase_ = 0
lowercase_ = -ba
lowercase_ = 1 + alpha
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 1 - alpha
lowercase_ = -2 * _cos
lowercase_ = 1 + alpha
lowercase_ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = 1 + alpha * big_a
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha * big_a
lowercase_ = 1 + alpha / big_a
lowercase_ = -2 * _cos
lowercase_ = 1 - alpha / big_a
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = (big_a + 1) - (big_a - 1) * _cos
lowercase_ = (big_a + 1) + (big_a - 1) * _cos
lowercase_ = (big_a - 1) - (big_a + 1) * _cos
lowercase_ = (big_a - 1) + (big_a + 1) * _cos
lowercase_ = 2 * sqrt(__lowerCamelCase ) * alpha
lowercase_ = big_a * (pmc + aaa)
lowercase_ = 2 * big_a * mpc
lowercase_ = big_a * (pmc - aaa)
lowercase_ = ppmc + aaa
lowercase_ = -2 * pmpc
lowercase_ = ppmc - aaa
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: float , __lowerCamelCase: float = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase_ = tau * frequency / samplerate
lowercase_ = sin(__lowerCamelCase )
lowercase_ = cos(__lowerCamelCase )
lowercase_ = _sin / (2 * q_factor)
lowercase_ = 10 ** (gain_db / 40)
lowercase_ = (big_a + 1) - (big_a - 1) * _cos
lowercase_ = (big_a + 1) + (big_a - 1) * _cos
lowercase_ = (big_a - 1) - (big_a + 1) * _cos
lowercase_ = (big_a - 1) + (big_a + 1) * _cos
lowercase_ = 2 * sqrt(__lowerCamelCase ) * alpha
lowercase_ = big_a * (ppmc + aaa)
lowercase_ = -2 * big_a * pmpc
lowercase_ = big_a * (ppmc - aaa)
lowercase_ = pmc + aaa
lowercase_ = 2 * mpc
lowercase_ = pmc - aaa
lowercase_ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 362
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase__ = TransfoXLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowercase_ = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "<unk> UNwanted , running"
lowercase_ = "<unk> unwanted, running"
return input_text, output_text
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
lowercase_ = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(UpperCAmelCase , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCAmelCase )
lowercase_ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
lowercase_ = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = len(UpperCAmelCase )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 363
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 297
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
from __future__ import annotations
import math
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = size
# approximate the overall size of segment tree with given value
lowercase_ = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase_ = [0 for i in range(0 , 4 * size )]
lowercase_ = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return idx * 2
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return idx * 2 + 1
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
'''simple docstring'''
if left_element == right_element:
lowercase_ = a[left_element - 1]
else:
lowercase_ = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.build(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase )
lowercase_ = max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool:
'''simple docstring'''
if self.flag[idx] is True:
lowercase_ = self.lazy[idx]
lowercase_ = False
if left_element != right_element:
lowercase_ = self.lazy[idx]
lowercase_ = self.lazy[idx]
lowercase_ = True
lowercase_ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase_ = val
if left_element != right_element:
lowercase_ = val
lowercase_ = val
lowercase_ = True
lowercase_ = True
return True
lowercase_ = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.update(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
return True
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int | float:
'''simple docstring'''
if self.flag[idx] is True:
lowercase_ = self.lazy[idx]
lowercase_ = False
if left_element != right_element:
lowercase_ = self.lazy[idx]
lowercase_ = self.lazy[idx]
lowercase_ = True
lowercase_ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase_ = (left_element + right_element) // 2
lowercase_ = self.query(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase_ = self.query(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return max(UpperCAmelCase , UpperCAmelCase )
def __str__( self ) -> str:
'''simple docstring'''
return str([self.query(1 , 1 , self.size , UpperCAmelCase , UpperCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 365
|
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE__ = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
SCREAMING_SNAKE_CASE__ = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
SCREAMING_SNAKE_CASE__ = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> int:
'''simple docstring'''
if return_pvalue:
lowercase_ = pearsonr(UpperCAmelCase , UpperCAmelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(UpperCAmelCase , UpperCAmelCase )[0] )}
| 297
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __lowerCamelCase :
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return None
class __lowerCamelCase :
"""simple docstring"""
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
return None
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
@require_torch
@slow
def A__ ( self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
@require_torch
@slow
def A__ ( self ) -> str:
'''simple docstring'''
from transformers import BertModel
lowercase_ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(UpperCAmelCase ) )
vocab_file.flush()
lowercase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ = BertModel(BertConfig(vocab_size=len(UpperCAmelCase ) ) )
model.save_pretrained(UpperCAmelCase )
self._test_export(UpperCAmelCase , "pt" , 12 , UpperCAmelCase )
@require_tf
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase )
lowercase_ = quantize(Path(UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ = self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase )
lowercase_ = quantize(UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ = Path(UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return path
except Exception as e:
self.fail(UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
from transformers import BertModel
lowercase_ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def A__ ( self ) -> int:
'''simple docstring'''
from transformers import TFBertModel
lowercase_ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "tf" )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FeatureExtractionPipeline(UpperCAmelCase , UpperCAmelCase )
lowercase_ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase_ , lowercase_ , lowercase_ , lowercase_ = infer_shapes(UpperCAmelCase , UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = ["input_ids", "attention_mask", "token_type_ids"]
lowercase_ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase_ , lowercase_ = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase ) , set(UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase , UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 366
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "microsoft/speecht5_tts"
lowerCAmelCase__ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowerCAmelCase__ = "text_reader"
lowerCAmelCase__ = SpeechTaProcessor
lowerCAmelCase__ = SpeechTaForTextToSpeech
lowerCAmelCase__ = SpeechTaHifiGan
lowerCAmelCase__ = ["text"]
lowerCAmelCase__ = ["audio"]
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if self.post_processor is None:
lowercase_ = "microsoft/speecht5_hifigan"
super().setup()
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowercase_ = self.pre_processor(text=UpperCAmelCase , return_tensors="pt" , truncation=UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowercase_ = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowercase_ = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCAmelCase ).cpu().detach()
| 367
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
lowercase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowercase_ = DDPMScheduler()
lowercase_ = AudioDiffusionPipeline(vqvae=UpperCAmelCase , unet=self.dummy_unet , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase , steps=4 , return_dict=UpperCAmelCase )
lowercase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowercase_ = DDIMScheduler()
lowercase_ = self.dummy_vqvae_and_unet
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(raw_audio=UpperCAmelCase , generator=UpperCAmelCase , start_step=5 , steps=10 )
lowercase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase_ = self.dummy_unet_condition
lowercase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase , mel=UpperCAmelCase , scheduler=UpperCAmelCase )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
np.random.seed(0 )
lowercase_ = torch.rand((1, 1, 10) )
lowercase_ = pipe(generator=UpperCAmelCase , encoding=UpperCAmelCase )
lowercase_ = output.images[0]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device
lowercase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(42 )
lowercase_ = pipe(generator=UpperCAmelCase )
lowercase_ = output.audios[0]
lowercase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
lowercase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 297
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] ):
'''simple docstring'''
lowercase_ = [0] * len(__lowerCamelCase )
lowercase_ = []
lowercase_ = []
lowercase_ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowercase_ = queue.pop(0 )
cnt += 1
topo.append(__lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
if cnt != len(__lowerCamelCase ):
print("Cycle exists" )
else:
print(__lowerCamelCase )
# Adjacency List of Graph
SCREAMING_SNAKE_CASE__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 368
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt-neox-20b""": 2_0_4_8,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = SwinConfig()
lowercase_ = swin_name.split("_" )
lowercase_ = name_split[1]
lowercase_ = int(name_split[4] )
lowercase_ = int(name_split[3][-1] )
if model_size == "tiny":
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "small":
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "base":
lowercase_ = 128
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
else:
lowercase_ = 192
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
if "in22k" in swin_name:
lowercase_ = 2_1841
else:
lowercase_ = 1000
lowercase_ = "huggingface/label-files"
lowercase_ = "imagenet-1k-id2label.json"
lowercase_ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = img_size
lowercase_ = num_classes
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
return config
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase_ = "encoder." + name
if "attn.proj" in name:
lowercase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase_ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
lowercase_ = "layernorm.weight"
if name == "norm.bias":
lowercase_ = "layernorm.bias"
if "head" in name:
lowercase_ = name.replace("head" , "classifier" )
else:
lowercase_ = "swin." + name
return name
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ = key.split("." )
lowercase_ = int(key_split[1] )
lowercase_ = int(key_split[3] )
lowercase_ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[
dim : dim * 2, :
]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[
:dim
]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[
-dim:
]
else:
lowercase_ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
lowercase_ = get_swin_config(__lowerCamelCase )
lowercase_ = SwinForImageClassification(__lowerCamelCase )
model.eval()
lowercase_ = convert_state_dict(timm_model.state_dict() , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
lowercase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
lowercase_ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
lowercase_ = image_processor(images=__lowerCamelCase , return_tensors="pt" )
lowercase_ = timm_model(inputs["pixel_values"] )
lowercase_ = model(**__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 370
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=13 , UpperCAmelCase=30 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=32 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=10 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=None , UpperCAmelCase=[0, 1, 2, 3] , ) -> Any:
'''simple docstring'''
lowercase_ = parent
lowercase_ = 100
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = out_indices
lowercase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 1
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = BeitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = BeitForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = BeitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = BeitForSemanticSegmentation(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = BeitModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
lowercase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowercase_ = model(**UpperCAmelCase ).loss
loss.backward()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ = model_class(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase )
model.train()
lowercase_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowercase_ = model(**UpperCAmelCase ).loss
loss.backward()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = BeitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> str:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).pixel_values.to(UpperCAmelCase )
# prepare bool_masked_pos
lowercase_ = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(pixel_values=UpperCAmelCase , bool_masked_pos=UpperCAmelCase )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase , atol=1e-2 ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
lowercase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 21841) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
lowercase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase )
lowercase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase_ = Image.open(ds[0]["file"] )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.logits
# verify the logits
lowercase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCAmelCase )
lowercase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
lowercase_ = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=UpperCAmelCase , )
else:
lowercase_ = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = BeitImageProcessor(do_resize=UpperCAmelCase , size=640 , do_center_crop=UpperCAmelCase )
lowercase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowercase_ = Image.open(ds[0]["file"] )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
lowercase_ = outputs.logits.detach().cpu()
lowercase_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase , target_sizes=[(500, 300)] )
lowercase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
lowercase_ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase )
lowercase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase )
| 371
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: int , __lowerCamelCase: Any="attention" ):
'''simple docstring'''
lowercase_ = lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase_ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase_ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase_ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase_ = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase_ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False ):
'''simple docstring'''
if split_mlp_wi:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase_ = (wi_a, wi_a)
else:
lowercase_ = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase_ = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: dict , *, __lowerCamelCase: int , __lowerCamelCase: bool , __lowerCamelCase: bool = False ):
'''simple docstring'''
lowercase_ = traverse_util.flatten_dict(variables["target"] )
lowercase_ = {"/".join(__lowerCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase_ = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , __lowerCamelCase )
lowercase_ = collections.OrderedDict()
# Shared embeddings.
lowercase_ = old["token_embedder/embedding"]
# Encoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "encoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , __lowerCamelCase , "encoder" ).T
lowercase_ = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "encoder" ).T
lowercase_ = tax_relpos_bias_lookup(
__lowerCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowerCamelCase ):
# Block i, layer 0 (Self Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_self_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "self_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 1 (Cross Attention).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = tax_attention_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "encoder_decoder_attention" )
lowercase_ = layer_norm
lowercase_ = k.T
lowercase_ = o.T
lowercase_ = q.T
lowercase_ = v.T
# Block i, layer 2 (MLP).
lowercase_ = tax_layer_norm_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , "pre_mlp_layer_norm" )
lowercase_ , lowercase_ = tax_mlp_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" , __lowerCamelCase )
lowercase_ = layer_norm
if split_mlp_wi:
lowercase_ = wi[0].T
lowercase_ = wi[1].T
else:
lowercase_ = wi.T
lowercase_ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase_ = tax_relpos_bias_lookup(__lowerCamelCase , __lowerCamelCase , "decoder" ).T
lowercase_ = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase_ = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: bool ):
'''simple docstring'''
lowercase_ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase_ = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase_ = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any] , __lowerCamelCase: Any ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(__lowerCamelCase )
lowercase_ = convert_tax_to_pytorch(
__lowerCamelCase , num_layers=config.num_layers , is_encoder_only=__lowerCamelCase , scalable_attention=__lowerCamelCase )
lowercase_ = make_state_dict(__lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
lowercase_ = MTaConfig.from_json_file(__lowerCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase_ = UMTaEncoderModel(__lowerCamelCase )
else:
lowercase_ = UMTaForConditionalGeneration(__lowerCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__lowerCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowerCamelCase )
print("Done" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 297
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[int]=False , snake_case__ : int=False , snake_case__ : Any=6.0 , snake_case__ : List[str]=None , snake_case__ : Any=False , snake_case__ : Optional[int]=False , snake_case__ : Optional[int]=None , snake_case__ : List[Any]="fp4" , snake_case__ : Optional[Any]=False , **snake_case__ : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = load_in_abit
UpperCAmelCase__ : Tuple = load_in_abit
UpperCAmelCase__ : List[Any] = llm_inta_threshold
UpperCAmelCase__ : Optional[int] = llm_inta_skip_modules
UpperCAmelCase__ : Any = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase__ : Union[str, Any] = llm_inta_has_fpaa_weight
UpperCAmelCase__ : Dict = bnb_abit_quant_type
UpperCAmelCase__ : List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase__ : List[str] = torch.floataa
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : str = getattr(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , torch.dtype ):
UpperCAmelCase__ : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , snake_case__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , snake_case__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , snake_case__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , snake_case__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , snake_case__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , snake_case__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def __a ( self : Union[str, Any] ):
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __a ( cls : str , snake_case__ : str , snake_case__ : int , **snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = cls(**snake_case__ )
UpperCAmelCase__ : Tuple = []
for key, value in kwargs.items():
if hasattr(snake_case__ , snake_case__ ):
setattr(snake_case__ , snake_case__ , snake_case__ )
to_remove.append(snake_case__ )
for key in to_remove:
kwargs.pop(snake_case__ , snake_case__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __a ( self : str , snake_case__ : Union[str, os.PathLike] ):
'''simple docstring'''
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
UpperCAmelCase__ : Dict = self.to_dict()
UpperCAmelCase__ : int = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n"
writer.write(snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : List[str] = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self : List[str] ):
'''simple docstring'''
return f'{self.__class__.__name__} {self.to_json_string()}'
def __a ( self : List[Any] , snake_case__ : bool = True ):
'''simple docstring'''
if use_diff is True:
UpperCAmelCase__ : Any = self.to_diff_dict()
else:
UpperCAmelCase__ : Optional[int] = self.to_dict()
return json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + "\n"
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.to_dict()
# get the default config dict
UpperCAmelCase__ : List[str] = BitsAndBytesConfig().to_dict()
UpperCAmelCase__ : str = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase__ : Any = value
return serializable_config_dict
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_lowerCAmelCase : int = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class lowerCAmelCase__ ( tr.AbstractTransform ):
def __init__( self : Tuple , snake_case__ : str = " " ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = sentence_delimiter
def __a ( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
return list(snake_case__ )
def __a ( self : List[str] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = []
for sent_idx, sentence in enumerate(snake_case__ ):
chars.extend(self.process_string(snake_case__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
_lowerCAmelCase : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_lowerCAmelCase : List[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_lowerCAmelCase : Dict = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
_lowerCAmelCase : Optional[int] = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
_lowerCAmelCase : Any = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def __a ( self : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : str=False ):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )["wer"]
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = 0
for prediction, reference in zip(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Union[str, Any] = jiwer.compute_measures(
snake_case__ , snake_case__ , truth_transform=snake_case__ , hypothesis_transform=snake_case__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 298
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 1
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_lowerCAmelCase : Any = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] )-> Optional[Any]:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'\n{hint}' if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = requirement, None, None
else:
UpperCAmelCase__ : str = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , snake_case )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f' got {requirement}' )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = match[0]
UpperCAmelCase__ : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase__ : List[str] = {}
for w in want_range:
UpperCAmelCase__ : Optional[Any] = re.findall(r"^([\s!=<>]{1,2})(.+)" , snake_case )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f' but got {requirement}' )
UpperCAmelCase__ , UpperCAmelCase__ : str = match[0]
UpperCAmelCase__ : str = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
UpperCAmelCase__ : Optional[int] = ".".join([str(snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return
# check if any version is installed
try:
UpperCAmelCase__ : Optional[Any] = importlib.metadata.version(snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(snake_case , snake_case )
| 298
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict , snake_case : Tuple , snake_case : str )-> List[Any]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase__ : List[Any] = mf_knapsack(i - 1 , snake_case , snake_case , snake_case )
else:
UpperCAmelCase__ : Tuple = max(
mf_knapsack(i - 1 , snake_case , snake_case , snake_case ) , mf_knapsack(i - 1 , snake_case , snake_case , j - wt[i - 1] ) + val[i - 1] , )
UpperCAmelCase__ : str = val
return f[i][j]
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : List[Any] )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase__ : int = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCAmelCase__ : List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list , snake_case : list )-> Union[str, Any]:
'''simple docstring'''
if not (isinstance(snake_case , (list, tuple) ) and isinstance(snake_case , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
UpperCAmelCase__ : str = len(snake_case )
if num_items != len(snake_case ):
UpperCAmelCase__ : List[str] = (
"The number of weights must be the same as the number of values.\n"
f'But got {num_items} weights and {len(snake_case )} values'
)
raise ValueError(snake_case )
for i in range(snake_case ):
if not isinstance(wt[i] , snake_case ):
UpperCAmelCase__ : Union[str, Any] = (
"All weights must be integers but got weight of "
f'type {type(wt[i] )} at index {i}'
)
raise TypeError(snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : str = knapsack(snake_case , snake_case , snake_case , snake_case )
UpperCAmelCase__ : set = set()
_construct_solution(snake_case , snake_case , snake_case , snake_case , snake_case )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : list , snake_case : int , snake_case : int , snake_case : set )-> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case , snake_case , i - 1 , snake_case , snake_case )
else:
optimal_set.add(snake_case )
_construct_solution(snake_case , snake_case , i - 1 , j - wt[i - 1] , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = [3, 2, 4, 4]
_lowerCAmelCase : List[str] = [4, 3, 2, 3]
_lowerCAmelCase : str = 4
_lowerCAmelCase : Union[str, Any] = 6
_lowerCAmelCase : Union[str, Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCAmelCase , _lowerCAmelCase : Tuple = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCAmelCase , _lowerCAmelCase : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 298
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : Any=1_3 , snake_case__ : Dict=7 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : List[str]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : List[Any]=2 , snake_case__ : int=4 , snake_case__ : Any=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=5_1_2 , snake_case__ : List[str]=1_6 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[int]=3 , snake_case__ : List[Any]=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Union[str, Any] = 1_3
UpperCAmelCase__ : str = 7
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Dict = 9_9
UpperCAmelCase__ : Any = 3_8_4
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Union[str, Any] = 4
UpperCAmelCase__ : Any = 3_7
UpperCAmelCase__ : List[Any] = "gelu"
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : Union[str, Any] = 0.1
UpperCAmelCase__ : List[Any] = 5_1_2
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = 2
UpperCAmelCase__ : Optional[Any] = 0.02
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = 4
UpperCAmelCase__ : Optional[Any] = 1_2_8
UpperCAmelCase__ : List[Any] = 2
UpperCAmelCase__ : Any = 9
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : Any = None
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Dict = None
if self.use_labels:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : str , snake_case__ : Any , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFConvBertModel(config=snake_case__ )
UpperCAmelCase__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCAmelCase__ : int = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(snake_case__ )
UpperCAmelCase__ : Any = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = TFConvBertForMaskedLM(config=snake_case__ )
UpperCAmelCase__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Dict = TFConvBertForSequenceClassification(config=snake_case__ )
UpperCAmelCase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : Tuple = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Any , snake_case__ : Any , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.num_choices
UpperCAmelCase__ : Union[str, Any] = TFConvBertForMultipleChoice(config=snake_case__ )
UpperCAmelCase__ : List[str] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Optional[int] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : Any = TFConvBertForTokenClassification(config=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : List[str] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = TFConvBertForQuestionAnswering(config=snake_case__ )
UpperCAmelCase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCAmelCase__ : str = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = TFConvBertModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : str = True
if hasattr(snake_case__ , "use_cache" ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Tuple = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : Union[str, Any] = getattr(self.model_tester , "key_length" , snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase__ : Any = model_class(snake_case__ )
UpperCAmelCase__ : Dict = len(model(snake_case__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ , saved_model=snake_case__ )
UpperCAmelCase__ : Dict = os.path.join(snake_case__ , "saved_model" , "1" )
UpperCAmelCase__ : Union[str, Any] = tf.keras.models.load_model(snake_case__ )
UpperCAmelCase__ : List[Any] = model(snake_case__ )
if self.is_encoder_decoder:
UpperCAmelCase__ : Any = outputs["encoder_hidden_states"]
UpperCAmelCase__ : Dict = outputs["encoder_attentions"]
else:
UpperCAmelCase__ : int = outputs["hidden_states"]
UpperCAmelCase__ : int = outputs["attentions"]
self.assertEqual(len(snake_case__ ) , snake_case__ )
UpperCAmelCase__ : Optional[int] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Any = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase__ : List[Any] = getattr(self.model_tester , "key_length" , snake_case__ )
UpperCAmelCase__ : List[str] = getattr(self.model_tester , "key_length" , snake_case__ )
def check_decoder_attentions_output(snake_case__ : int ):
UpperCAmelCase__ : List[str] = len(snake_case__ )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase__ : Tuple = outputs.decoder_attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case__ : Any ):
UpperCAmelCase__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : str = model_class(snake_case__ )
UpperCAmelCase__ : Dict = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Union[str, Any] = len(snake_case__ )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
if self.is_encoder_decoder:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : Dict = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_decoder_attentions_output(snake_case__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Any = model_class(snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
# Check attention is always last and order is fine
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = model_class(snake_case__ )
UpperCAmelCase__ : int = model(self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case__ ) )
self.assertEqual(model.config.output_hidden_states , snake_case__ )
check_encoder_attentions_output(snake_case__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ : Tuple = model(snake_case__ )[0]
UpperCAmelCase__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : str = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 298
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 1
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase : Union[str, Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> str:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case , id=snake_case )
| 298
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 1
|
"""simple docstring"""
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Optional[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if is_prime(snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int )-> str:
'''simple docstring'''
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(snake_case )]
UpperCAmelCase__ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case ) <= key:
return input_string
for position, character in enumerate(snake_case ):
UpperCAmelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : Tuple = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case )
UpperCAmelCase__ : Dict = ["".join(snake_case ) for row in temp_grid]
UpperCAmelCase__ : List[str] = "".join(snake_case )
return output_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int )-> str:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase__ : list[list[str]] = [[] for _ in range(snake_case )] # generates template
for position in range(len(snake_case ) ):
UpperCAmelCase__ : Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : int = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase__ : Dict = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase__ : Optional[Any] = input_string[counter : counter + len(snake_case )]
grid.append(list(snake_case ) )
counter += len(snake_case )
UpperCAmelCase__ : str = "" # reads as zigzag
for position in range(len(snake_case ) ):
UpperCAmelCase__ : Any = position % (lowest * 2) # puts it in bounds
UpperCAmelCase__ : List[Any] = min(snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> dict[int, str]:
'''simple docstring'''
UpperCAmelCase__ : Dict = {}
for key_guess in range(1 , len(snake_case ) ): # tries every key
UpperCAmelCase__ : Tuple = decrypt(snake_case , snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> YolosConfig:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCAmelCase__ : Any = 192
UpperCAmelCase__ : str = 768
UpperCAmelCase__ : List[str] = 12
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : List[Any] = [800, 1333]
UpperCAmelCase__ : List[str] = False
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase__ : int = 330
UpperCAmelCase__ : List[Any] = 14
UpperCAmelCase__ : Optional[int] = 6
UpperCAmelCase__ : Optional[int] = 1320
elif "yolos_s" in yolos_name:
UpperCAmelCase__ : Any = 384
UpperCAmelCase__ : List[Any] = 1536
UpperCAmelCase__ : List[str] = 12
UpperCAmelCase__ : Tuple = 6
elif "yolos_b" in yolos_name:
UpperCAmelCase__ : Tuple = [800, 1344]
UpperCAmelCase__ : Optional[Any] = 91
UpperCAmelCase__ : str = "huggingface/label-files"
UpperCAmelCase__ : Optional[int] = "coco-detection-id2label.json"
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ : List[str] = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Union[str, Any] = idalabel
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : YolosConfig , snake_case : bool = False )-> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : List[str] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
UpperCAmelCase__ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase__ : Optional[int] = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : Optional[int] = in_proj_weight[-config.hidden_size :, :]
UpperCAmelCase__ : str = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
if "backbone" in name:
UpperCAmelCase__ : str = name.replace("backbone" , "vit" )
if "cls_token" in name:
UpperCAmelCase__ : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
UpperCAmelCase__ : Optional[int] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
UpperCAmelCase__ : Dict = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
UpperCAmelCase__ : Tuple = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
UpperCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase__ : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase__ : Any = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase__ : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__ : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__ : Dict = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
UpperCAmelCase__ : Dict = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
UpperCAmelCase__ : str = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
UpperCAmelCase__ : Any = name.replace("vit.norm" , "vit.layernorm" )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : dict , snake_case : YolosForObjectDetection )-> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : List[str] = orig_state_dict.pop(snake_case )
if "qkv" in key:
UpperCAmelCase__ : Union[str, Any] = key.split("." )
UpperCAmelCase__ : int = int(key_split[2] )
UpperCAmelCase__ : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCAmelCase__ : Optional[Any] = val[:dim, :]
UpperCAmelCase__ : Union[str, Any] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : int = val[-dim:, :]
else:
UpperCAmelCase__ : int = val[:dim]
UpperCAmelCase__ : int = val[dim : dim * 2]
UpperCAmelCase__ : Union[str, Any] = val[-dim:]
else:
UpperCAmelCase__ : List[str] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( )-> torch.Tensor:
'''simple docstring'''
UpperCAmelCase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str , snake_case : bool = False )-> int:
'''simple docstring'''
UpperCAmelCase__ : Any = get_yolos_config(snake_case )
# load original state_dict
UpperCAmelCase__ : List[str] = torch.load(snake_case , map_location="cpu" )["model"]
# load 🤗 model
UpperCAmelCase__ : Tuple = YolosForObjectDetection(snake_case )
model.eval()
UpperCAmelCase__ : Tuple = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCAmelCase__ : Tuple = 800 if yolos_name != "yolos_ti" else 512
UpperCAmelCase__ : Optional[int] = YolosImageProcessor(format="coco_detection" , size=snake_case )
UpperCAmelCase__ : Tuple = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase__ : Optional[int] = model(**snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = outputs.logits, outputs.pred_boxes
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = None, None
if yolos_name == "yolos_ti":
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCAmelCase__ : List[str] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCAmelCase__ : Any = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCAmelCase__ : Tuple = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , snake_case , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
UpperCAmelCase__ : List[str] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
UpperCAmelCase__ : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case , organization="hustvl" )
model.push_to_hub(snake_case , organization="hustvl" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mobilenet_v2'''
def __init__( self : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : str=2_2_4 , snake_case__ : Dict=1.0 , snake_case__ : List[str]=8 , snake_case__ : List[Any]=8 , snake_case__ : Tuple=6 , snake_case__ : Optional[Any]=3_2 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Union[str, Any]="relu6" , snake_case__ : str=True , snake_case__ : List[str]=0.8 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Union[str, Any]=0.001 , snake_case__ : List[str]=2_5_5 , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Optional[int] = depth_multiplier
UpperCAmelCase__ : Optional[Any] = depth_divisible_by
UpperCAmelCase__ : Optional[Any] = min_depth
UpperCAmelCase__ : List[Any] = expand_ratio
UpperCAmelCase__ : List[Any] = output_stride
UpperCAmelCase__ : Optional[int] = first_layer_is_expansion
UpperCAmelCase__ : Any = finegrained_output
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : int = tf_padding
UpperCAmelCase__ : List[Any] = classifier_dropout_prob
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Any = semantic_loss_ignore_index
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =version.parse('''1.11''' )
@property
def __a ( self : int ):
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def __a ( self : Dict ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def __a ( self : List[str] ):
'''simple docstring'''
return 1e-4
| 298
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 1
|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase__ : Dict = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase__ : Optional[int] = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , snake_case ):
pass
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , snake_case ) is None
with patch_submodule(_test_patching , "len" , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = "__test_patch_submodule_start_and_stop_mock__"
UpperCAmelCase__ : Union[str, Any] = patch_submodule(_test_patching , "open" , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase__ : List[Any] = "__test_patch_submodule_successive_join__"
UpperCAmelCase__ : Tuple = "__test_patch_submodule_successive_dirname__"
UpperCAmelCase__ : int = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , snake_case ):
with patch_submodule(_test_patching , "os.rename" , snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , snake_case ):
with patch_submodule(_test_patching , "os.path.join" , snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , snake_case ):
pass
| 298
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 1
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : list[int] )-> int:
'''simple docstring'''
if not isinstance(snake_case , snake_case ) or not all(isinstance(snake_case , snake_case ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(snake_case ) != 3 or not all(isinstance(snake_case , snake_case ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(snake_case ) == 0:
return 0
if min(snake_case ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(snake_case ) >= 366:
raise ValueError("All days elements should be less than 366" )
UpperCAmelCase__ : List[str] = set(snake_case )
@functools.cache
def dynamic_programming(snake_case : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE_ =Features({'''audio''': Audio()} )
SCREAMING_SNAKE_CASE_ =Features({'''transcription''': Value('''string''' )} )
SCREAMING_SNAKE_CASE_ ="audio"
SCREAMING_SNAKE_CASE_ ="transcription"
def __a ( self : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , snake_case__ ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
UpperCAmelCase__ : Dict = copy.deepcopy(self )
UpperCAmelCase__ : Dict = self.input_schema.copy()
UpperCAmelCase__ : Union[str, Any] = features[self.audio_column]
UpperCAmelCase__ : int = input_schema
return task_template
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 298
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''Salesforce/blip-image-captioning-base'''
SCREAMING_SNAKE_CASE_ =(
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
SCREAMING_SNAKE_CASE_ ='''image_captioner'''
SCREAMING_SNAKE_CASE_ =AutoModelForVisionaSeq
SCREAMING_SNAKE_CASE_ =['''image''']
SCREAMING_SNAKE_CASE_ =['''text''']
def __init__( self : str , *snake_case__ : Dict , **snake_case__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*snake_case__ , **snake_case__ )
def __a ( self : Tuple , snake_case__ : "Image" ):
'''simple docstring'''
return self.pre_processor(images=snake_case__ , return_tensors="pt" )
def __a ( self : List[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
return self.model.generate(**snake_case__ )
def __a ( self : str , snake_case__ : str ):
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0].strip()
| 298
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCAmelCase__ : Any = DetaConfig(
backbone_config=snake_case , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=snake_case , with_box_refine=snake_case , two_stage=snake_case , )
# set labels
UpperCAmelCase__ : List[Any] = "huggingface/label-files"
if "o365" in model_name:
UpperCAmelCase__ : Dict = 366
UpperCAmelCase__ : List[Any] = "object365-id2label.json"
else:
UpperCAmelCase__ : Any = 91
UpperCAmelCase__ : Optional[Any] = "coco-detection-id2label.json"
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : Any = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase__ : List[Any] = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Union[str, Any] = idalabel
UpperCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Dict , snake_case : List[Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = dct.pop(snake_case )
UpperCAmelCase__ : Optional[int] = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[int] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ : str = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : List[Any] = in_proj_weight[:dim, :]
UpperCAmelCase__ : str = in_proj_bias[: dim]
UpperCAmelCase__ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ : str = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ : int = in_proj_weight[
-dim :, :
]
UpperCAmelCase__ : List[str] = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Tuple )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ : int = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : Any = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : str = in_proj_weight[:hidden_size, :]
UpperCAmelCase__ : Any = in_proj_bias[:hidden_size]
UpperCAmelCase__ : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ : Tuple = in_proj_weight[-hidden_size:, :]
UpperCAmelCase__ : Optional[int] = in_proj_bias[-hidden_size:]
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ : List[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[int] , snake_case : List[str] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = get_deta_config(snake_case )
# load original state dict
if model_name == "deta-swin-large":
UpperCAmelCase__ : Union[str, Any] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase__ : int = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(f'Model name {model_name} not supported' )
UpperCAmelCase__ : Any = torch.load(snake_case , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(snake_case , param.shape )
# rename keys
UpperCAmelCase__ : List[str] = create_rename_keys(snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_swin_q_k_v(snake_case , config.backbone_config )
read_in_decoder_q_k_v(snake_case , snake_case )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(snake_case )
UpperCAmelCase__ : Any = val
if "input_proj" in key:
UpperCAmelCase__ : Dict = state_dict.pop(snake_case )
UpperCAmelCase__ : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCAmelCase__ : Tuple = state_dict.pop(snake_case )
UpperCAmelCase__ : int = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase__ : Optional[int] = DetaForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
UpperCAmelCase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
model.to(snake_case )
# load image processor
UpperCAmelCase__ : Tuple = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Dict = processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase__ : Tuple = encoding["pixel_values"]
UpperCAmelCase__ : Any = model(pixel_values.to(snake_case ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCAmelCase__ : str = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
UpperCAmelCase__ : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
UpperCAmelCase__ : Optional[int] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
UpperCAmelCase__ : Optional[int] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 1
|
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : str = "" , snake_case__ : bool = False ):
'''simple docstring'''
# Mapping from the first character of the prefix of the node
UpperCAmelCase__ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
UpperCAmelCase__ : Optional[Any] = is_leaf
UpperCAmelCase__ : Tuple = prefix
def __a ( self : int , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __a ( self : List[str] , snake_case__ : list[str] ):
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def __a ( self : Any , snake_case__ : str ):
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
UpperCAmelCase__ : int = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
UpperCAmelCase__ : Tuple = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
UpperCAmelCase__ : Dict = self.nodes[word[0]]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
UpperCAmelCase__ : Dict = remaining_prefix
UpperCAmelCase__ : Tuple = self.nodes[matching_string[0]]
UpperCAmelCase__ : Any = RadixNode(snake_case__ , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = aux_node
if remaining_word == "":
UpperCAmelCase__ : str = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
UpperCAmelCase__ : Optional[Any] = list(self.nodes.values() )[0]
UpperCAmelCase__ : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
UpperCAmelCase__ : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
UpperCAmelCase__ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
UpperCAmelCase__ : str = list(incoming_node.nodes.values() )[0]
UpperCAmelCase__ : Dict = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
UpperCAmelCase__ : Union[str, Any] = merging_node.nodes
return True
def __a ( self : List[Any] , snake_case__ : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def SCREAMING_SNAKE_CASE__ ( )-> bool:
'''simple docstring'''
UpperCAmelCase__ : int = "banana bananas bandana band apple all beast".split()
UpperCAmelCase__ : Any = RadixNode()
root.insert_many(snake_case )
assert all(root.find(snake_case ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
assert test_trie()
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Any = RadixNode()
UpperCAmelCase__ : int = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case )
print("Words:" , snake_case )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 298
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''glpn'''
def __init__( self : Any , snake_case__ : List[Any]=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=[2, 2, 2, 2] , snake_case__ : Any=[8, 4, 2, 1] , snake_case__ : str=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case__ : Tuple=[7, 3, 3, 3] , snake_case__ : Union[str, Any]=[4, 2, 2, 2] , snake_case__ : List[Any]=[1, 2, 5, 8] , snake_case__ : Any=[4, 4, 4, 4] , snake_case__ : int="gelu" , snake_case__ : List[str]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : str=0.02 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=1e-6 , snake_case__ : Dict=6_4 , snake_case__ : Dict=1_0 , snake_case__ : Tuple=-1 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Tuple = num_encoder_blocks
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : Tuple = sr_ratios
UpperCAmelCase__ : List[Any] = hidden_sizes
UpperCAmelCase__ : Any = patch_sizes
UpperCAmelCase__ : Dict = strides
UpperCAmelCase__ : Optional[Any] = mlp_ratios
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : Tuple = drop_path_rate
UpperCAmelCase__ : Optional[int] = layer_norm_eps
UpperCAmelCase__ : Dict = decoder_hidden_size
UpperCAmelCase__ : Tuple = max_depth
UpperCAmelCase__ : Any = head_in_index
| 298
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape
UpperCAmelCase__ : Any = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase__ : Tuple = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict=None )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
for old_key in state_dict.keys():
UpperCAmelCase__ : Dict = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCAmelCase__ : Optional[Any] = key.replace("moe_layer.experts.0" , f'ffn.experts.expert_{expert_idx}' )
else:
UpperCAmelCase__ : Union[str, Any] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCAmelCase__ : List[str] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCAmelCase__ : int = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCAmelCase__ : Any = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCAmelCase__ : Dict = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCAmelCase__ : Tuple = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCAmelCase__ : Optional[int] = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCAmelCase__ : Optional[int] = state_dict[old_key]
return new_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : Any , snake_case : int , snake_case : str = WEIGHTS_NAME )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Dict = 0
os.makedirs(snake_case , exist_ok=snake_case )
for expert in range(snake_case ):
UpperCAmelCase__ : int = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(snake_case ):
UpperCAmelCase__ : Optional[int] = torch.load(snake_case )["model"]
remove_ignore_keys_(snake_case )
UpperCAmelCase__ : Union[str, Any] = rename_fairseq_keys(snake_case , snake_case )
UpperCAmelCase__ : Dict = os.path.join(
snake_case , weights_name.replace(".bin" , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
torch.save(snake_case , snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(snake_case )[0]].dtype )
# Add the last block
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , weights_name.replace(".bin" , f'-{len(snake_case )+1:05d}-of-???.bin' ) )
UpperCAmelCase__ : List[Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(snake_case )
UpperCAmelCase__ : Optional[int] = rename_fairseq_keys(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(snake_case ) == 1:
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
torch.save(snake_case , snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(snake_case , snake_case )
# Otherwise, let's build the index
UpperCAmelCase__ : List[str] = {}
for idx, shard in enumerate(snake_case ):
UpperCAmelCase__ : Optional[int] = weights_name.replace(".bin" , f'-{idx+1:05d}-of-{len(snake_case ):05d}.bin' )
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(snake_case , os.path.join(snake_case , snake_case ) )
for key in shard:
UpperCAmelCase__ : Optional[Any] = shard_file
# Add the metadata
UpperCAmelCase__ : Optional[int] = {"total_size": total_size}
UpperCAmelCase__ : int = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : List[str] = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_lowerCAmelCase : List[str] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase : int = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 298
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_lowerCAmelCase : Any = """bart"""
_lowerCAmelCase : int = True
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCAmelCase__ : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCAmelCase__ : Dict = qar_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (None, None)
if MODEL_TYPE == "bart":
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCAmelCase__ : int = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCAmelCase__ : Tuple = sas_model.eval()
else:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
if LOAD_DENSE_INDEX:
UpperCAmelCase__ : List[Any] = faiss.StandardGpuResources()
UpperCAmelCase__ : Any = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCAmelCase__ : Optional[int] = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
UpperCAmelCase__ : Union[str, Any] = faiss.IndexFlatIP(128 )
UpperCAmelCase__ : Any = faiss.index_cpu_to_gpu(snake_case , 1 , snake_case )
wikiaab_gpu_index_flat.add(snake_case ) # TODO fix for larger GPU
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (None, None)
UpperCAmelCase__ : int = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case )
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCAmelCase__ : Any = elia["train_eli5"]
UpperCAmelCase__ : List[Any] = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
UpperCAmelCase__ : List[Any] = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case )
return (elia_train, eli5_train_q_index)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = load_indexes()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = load_models()
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_train_data()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : str=10 )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = embed_questions_for_retrieval([question] , snake_case , snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Any = eli5_train_q_index.search(snake_case , snake_case )
UpperCAmelCase__ : List[str] = [elia_train[int(snake_case )] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Tuple="wiki40b" , snake_case : Dict="dense" , snake_case : Optional[int]=10 )-> Any:
'''simple docstring'''
if source == "none":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCAmelCase__ , UpperCAmelCase__ : int = query_qa_dense_index(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Any = query_es_index(
snake_case , snake_case , index_name="english_wiki40b_snippets_100w" , n_results=snake_case , )
UpperCAmelCase__ : Optional[Any] = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCAmelCase__ : str = "question: {} context: {}".format(snake_case , snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case : None),
} )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[int] , snake_case : Any , snake_case : Dict=64 , snake_case : Any=256 , snake_case : Optional[Any]=False , snake_case : Optional[int]=2 , snake_case : Optional[Any]=0.95 , snake_case : Dict=0.8 )-> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : List[str] = qa_sas_generate(
snake_case , snake_case , snake_case , num_answers=1 , num_beams=snake_case , min_len=snake_case , max_len=snake_case , do_sample=snake_case , temp=snake_case , top_p=snake_case , top_k=snake_case , max_input_length=1024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
_lowerCAmelCase : Dict = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
_lowerCAmelCase : Union[str, Any] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_lowerCAmelCase : Optional[Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
_lowerCAmelCase : int = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
_lowerCAmelCase : Union[str, Any] = st.sidebar.checkbox("""Demo options""")
if demo_options:
_lowerCAmelCase : str = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
_lowerCAmelCase : List[str] = action_list.index(action_st)
_lowerCAmelCase : int = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
_lowerCAmelCase : int = show_type == """Show full text of passages"""
else:
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : Any = True
_lowerCAmelCase : str = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
_lowerCAmelCase : Union[str, Any] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
_lowerCAmelCase : Any = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
_lowerCAmelCase : Union[str, Any] = """wiki40b"""
_lowerCAmelCase : Any = """dense"""
_lowerCAmelCase : Tuple = """beam"""
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : Any = 64
_lowerCAmelCase : Optional[int] = 256
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Dict = st.sidebar.checkbox("""Generation options""")
if generate_options:
_lowerCAmelCase : str = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
_lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
_lowerCAmelCase : Tuple = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
_lowerCAmelCase : List[str] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
_lowerCAmelCase : int = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_lowerCAmelCase : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_lowerCAmelCase : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_lowerCAmelCase : Dict = None
# start main text
_lowerCAmelCase : Optional[Any] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
_lowerCAmelCase : Union[str, Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_lowerCAmelCase : Optional[int] = st.text_input("""Enter your question here:""", """""")
else:
_lowerCAmelCase : Optional[Any] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
_lowerCAmelCase , _lowerCAmelCase : int = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
_lowerCAmelCase : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_lowerCAmelCase : int = support_list[:10]
_lowerCAmelCase : Any = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
_lowerCAmelCase , _lowerCAmelCase : Any = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
_lowerCAmelCase : List[Any] = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
_lowerCAmelCase : int = res[1].strip()
if sec_titles == "":
_lowerCAmelCase : int = """[{}]({})""".format(res[0], wiki_url)
else:
_lowerCAmelCase : Any = sec_titles.split(""" & """)
_lowerCAmelCase : Tuple = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
_lowerCAmelCase : int = find_nearest_training(question)
_lowerCAmelCase : Optional[Any] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
_lowerCAmelCase : str = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
_lowerCAmelCase : Union[str, Any] = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 298
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =BlenderbotSmallConfig
SCREAMING_SNAKE_CASE_ ={}
SCREAMING_SNAKE_CASE_ ='''gelu'''
def __init__( self : str , snake_case__ : str , snake_case__ : Optional[int]=1_3 , snake_case__ : Optional[Any]=7 , snake_case__ : Dict=True , snake_case__ : int=False , snake_case__ : List[Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[str]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : int=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : str=2_0 , snake_case__ : str=2 , snake_case__ : Any=1 , snake_case__ : Any=0 , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = eos_token_id
UpperCAmelCase__ : List[str] = pad_token_id
UpperCAmelCase__ : Dict = bos_token_id
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ : Dict = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ )
return config, inputs_dict
def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder()
UpperCAmelCase__ : Tuple = inputs_dict["input_ids"]
UpperCAmelCase__ : str = input_ids[:1, :]
UpperCAmelCase__ : Dict = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase__ : str = inputs_dict["head_mask"]
UpperCAmelCase__ : str = 1
# first forward pass
UpperCAmelCase__ : Any = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ : Dict = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict=None , snake_case : Tuple=None , snake_case : Any=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]=None , )-> Dict:
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase__ : List[Any] = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
SCREAMING_SNAKE_CASE_ =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ =(
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFBlenderbotSmallModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =[
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
SCREAMING_SNAKE_CASE_ ='''facebook/blenderbot_small-90M'''
@cached_property
def __a ( self : List[str] ):
'''simple docstring'''
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , return_tensors="tf" )
UpperCAmelCase__ : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , )
UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 298
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 1
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str]=None )-> Any:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
UpperCAmelCase__ : Dict = nn.Parameter(snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
UpperCAmelCase__ : Union[str, Any] = nn.Parameter(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple , snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = np.asarray(weights[0] )
UpperCAmelCase__ : int = np.asarray(weights[1] )
UpperCAmelCase__ : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : List[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = np.asarray(weights[0] )
UpperCAmelCase__ : Any = np.asarray(weights[1] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[2] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any , snake_case : int )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = weights[0][0][0]
UpperCAmelCase__ : Optional[Any] = np.asarray(layer_norm_a[0] )
UpperCAmelCase__ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# lsh weights + output
UpperCAmelCase__ : List[Any] = weights[0][1]
if len(snake_case ) < 4:
set_layer_weights_in_torch_lsh(snake_case , torch_block.attention , snake_case )
else:
set_layer_weights_in_torch_local(snake_case , torch_block.attention , snake_case )
# intermediate weighs
UpperCAmelCase__ : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case ) == 4:
UpperCAmelCase__ : int = intermediate_weights[2]
# layernorm 2
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# intermediate dense
UpperCAmelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
# intermediate out
UpperCAmelCase__ : Any = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = torch_model.reformer
# word embeds
UpperCAmelCase__ : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case ) , )
if isinstance(weights[3] , snake_case ):
UpperCAmelCase__ : Optional[int] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
UpperCAmelCase__ : List[str] = nn.Parameter(torch.tensor(snake_case ) )
UpperCAmelCase__ : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case , snake_case , snake_case )
# output layer norm
UpperCAmelCase__ : List[str] = np.asarray(weights[7][0] )
UpperCAmelCase__ : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# output embeddings
UpperCAmelCase__ : Optional[Any] = np.asarray(weights[9][0] )
UpperCAmelCase__ : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = ReformerConfig.from_json_file(snake_case )
print(f'Building PyTorch model from configuration: {config}' )
UpperCAmelCase__ : Tuple = ReformerModelWithLMHead(snake_case )
with open(snake_case , "rb" ) as f:
UpperCAmelCase__ : Dict = pickle.load(snake_case )["weights"]
set_model_weights_in_torch(snake_case , snake_case , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(snake_case , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Union[str, Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = _distribute_shards(**snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = _split_gen_kwargs(snake_case , snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[Any] )-> Tuple:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(snake_case ):
_number_of_shards_in_gen_kwargs(snake_case )
else:
UpperCAmelCase__ : Any = _number_of_shards_in_gen_kwargs(snake_case )
assert out == expected
| 298
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCAmelCase : Optional[int] = TypeVar("""T""")
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] , snake_case__ : list[T] , snake_case__ : Callable[[T, T], T] ):
'''simple docstring'''
UpperCAmelCase__ : Any | T = None
UpperCAmelCase__ : int = len(snake_case__ )
UpperCAmelCase__ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase__ : List[str] = fnc
self.build()
def __a ( self : Optional[int] ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase__ : int = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __a ( self : List[Any] , snake_case__ : int , snake_case__ : T ):
'''simple docstring'''
p += self.N
UpperCAmelCase__ : int = v
while p > 1:
UpperCAmelCase__ : str = p // 2
UpperCAmelCase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __a ( self : int , snake_case__ : int , snake_case__ : int ): # noqa: E741
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = l + self.N, r + self.N
UpperCAmelCase__ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase__ : Optional[Any] = self.st[l] if res is None else self.fn(snake_case__ , self.st[l] )
if r % 2 == 0:
UpperCAmelCase__ : str = self.st[r] if res is None else self.fn(snake_case__ , self.st[r] )
UpperCAmelCase__ , UpperCAmelCase__ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCAmelCase : Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCAmelCase : Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCAmelCase : List[Any] = SegmentTree(test_array, min)
_lowerCAmelCase : Tuple = SegmentTree(test_array, max)
_lowerCAmelCase : int = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
for i in range(len(snake_case ) ):
for j in range(snake_case , len(snake_case ) ):
UpperCAmelCase__ : Optional[Any] = reduce(snake_case , test_array[i : j + 1] )
UpperCAmelCase__ : List[Any] = reduce(snake_case , test_array[i : j + 1] )
UpperCAmelCase__ : Optional[Any] = reduce(lambda snake_case , snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case , snake_case )
assert max_range == max_segment_tree.query(snake_case , snake_case )
assert sum_range == sum_segment_tree.query(snake_case , snake_case )
test_all_segments()
for index, value in test_updates.items():
_lowerCAmelCase : Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 298
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> bool:
'''simple docstring'''
if num < 0:
return False
UpperCAmelCase__ : int = num
UpperCAmelCase__ : int = 0
while num > 0:
UpperCAmelCase__ : Dict = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase : Tuple = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Union[str, Any] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase__ : str = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any]="eval" )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = os.path.join(snake_case , f'{split}_results.json' )
if os.path.exists(snake_case ):
with open(snake_case , "r" ) as f:
return json.load(snake_case )
raise ValueError(f'can\'t find {path}' )
_lowerCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Tuple = f'\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_flax_glue.main()
UpperCAmelCase__ : List[Any] = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Union[str, Any] = f'\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_clm_flax.main()
UpperCAmelCase__ : Dict = get_results(snake_case__ )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f'\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_summarization_flax.main()
UpperCAmelCase__ : str = get_results(snake_case__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f'\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_mlm_flax.main()
UpperCAmelCase__ : List[str] = get_results(snake_case__ )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : str = f'\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_ta_mlm_flax.main()
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def __a ( self : Any ):
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase__ : List[str] = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Tuple = f'\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_flax_ner.main()
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Dict = f'\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n '.split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
run_qa.main()
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 298
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 1
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Dict[Optional[str], Type[Formatter]] = {}
_lowerCAmelCase : Dict[Optional[str], str] = {}
_lowerCAmelCase : Dict[Optional[str], Exception] = {}
def SCREAMING_SNAKE_CASE__ ( snake_case : type , snake_case : Optional[str] , snake_case : Optional[List[str]] = None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
UpperCAmelCase__ : Optional[Any] = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
UpperCAmelCase__ : Optional[Any] = format_type
def SCREAMING_SNAKE_CASE__ ( snake_case : Exception , snake_case : Optional[str] , snake_case : Optional[List[str]] = None )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCAmelCase__ : List[Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
_lowerCAmelCase : Dict = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
_lowerCAmelCase : str = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
_lowerCAmelCase : int = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] )-> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , **snake_case : List[str] )-> Formatter:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_format_type_from_alias(snake_case )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**snake_case )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 298
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =['''pixel_values''']
def __init__( self : Optional[Any] , snake_case__ : bool = True , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = PILImageResampling.BILINEAR , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , **snake_case__ : Any , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : Tuple = size if size is not None else {"shortest_edge": 2_5_6}
UpperCAmelCase__ : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase__ : Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
UpperCAmelCase__ : Tuple = get_size_dict(snake_case__ , param_name="crop_size" )
UpperCAmelCase__ : List[str] = do_resize
UpperCAmelCase__ : str = size
UpperCAmelCase__ : Any = resample
UpperCAmelCase__ : Optional[int] = do_center_crop
UpperCAmelCase__ : Dict = crop_size
UpperCAmelCase__ : Dict = do_rescale
UpperCAmelCase__ : Tuple = rescale_factor
UpperCAmelCase__ : Union[str, Any] = do_normalize
UpperCAmelCase__ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : int , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCAmelCase__ : str = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def __a ( self : Dict , snake_case__ : np.ndarray , snake_case__ : float , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __a ( self : str , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def __a ( self : List[Any] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = None , snake_case__ : bool = None , snake_case__ : Dict[str, int] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case__ : Tuple , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : Optional[Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase__ : Dict = resample if resample is not None else self.resample
UpperCAmelCase__ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(snake_case__ , param_name="crop_size" )
UpperCAmelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Dict = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase__ : int = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Dict = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase__ : int = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase__ : List[str] = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase__ : Any = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def __a ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Tuple] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
UpperCAmelCase__ : Any = target_sizes.numpy()
UpperCAmelCase__ : Union[str, Any] = []
for idx in range(len(snake_case__ ) ):
UpperCAmelCase__ : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
UpperCAmelCase__ : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
UpperCAmelCase__ : Optional[Any] = logits.argmax(dim=1 )
UpperCAmelCase__ : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 298
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ =['''image''']
SCREAMING_SNAKE_CASE_ =['''image''']
SCREAMING_SNAKE_CASE_ =[
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
SCREAMING_SNAKE_CASE_ =False
@property
def __a ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 3_2
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __a ( self : int ):
'''simple docstring'''
return 8
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCAmelCase__ : Optional[int] = CLIPVisionModel(snake_case__ )
return model
@property
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
@property
def __a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_6,
"embedding_dim": self.time_input_dim,
"num_embeddings": 3_2,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
UpperCAmelCase__ : Optional[int] = PriorTransformer(**snake_case__ )
return model
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = {
"param_shapes": (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 1_2,
"background": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase__ : Optional[Any] = ShapERenderer(**snake_case__ )
return model
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.dummy_prior
UpperCAmelCase__ : int = self.dummy_image_encoder
UpperCAmelCase__ : Optional[Any] = self.dummy_image_processor
UpperCAmelCase__ : List[Any] = self.dummy_renderer
UpperCAmelCase__ : Union[str, Any] = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=snake_case__ , clip_sample=snake_case__ , clip_sample_range=1.0 , )
UpperCAmelCase__ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def __a ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=0 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Dict = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : List[Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 3_2,
"output_type": "np",
}
return inputs
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = "cpu"
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Optional[int] = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : List[str] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Dict = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase__ : Optional[Any] = output.images[0]
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase__ : int = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : List[str] ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = torch_device == "cpu"
UpperCAmelCase__ : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case__ , relax_max_difference=snake_case__ , )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : str = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Any = self.get_dummy_inputs(snake_case__ )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase__ : Optional[int] = batch_size * [inputs[key]]
UpperCAmelCase__ : int = pipe(**snake_case__ , num_images_per_prompt=snake_case__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCAmelCase__ : Any = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCAmelCase__ : Any = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Any = torch.Generator(device=snake_case__ ).manual_seed(0 )
UpperCAmelCase__ : str = pipe(
snake_case__ , generator=snake_case__ , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 298
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 1
|
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCAmelCase : List[str] = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
_lowerCAmelCase : Union[str, Any] = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
_lowerCAmelCase : Any = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : List[Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : int = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
_lowerCAmelCase : Tuple = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : Any = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
_lowerCAmelCase : Optional[int] = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : List[str] = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
_lowerCAmelCase : List[Any] = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : Dict = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
_lowerCAmelCase : Optional[int] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : int = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
_lowerCAmelCase : Union[str, Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
_lowerCAmelCase : Dict = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
_lowerCAmelCase : Union[str, Any] = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
_lowerCAmelCase : Optional[Any] = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
_lowerCAmelCase : Tuple = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
_lowerCAmelCase : Dict = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
_lowerCAmelCase : int = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : str = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
_lowerCAmelCase : int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
_lowerCAmelCase : Any = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
_lowerCAmelCase : Dict = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : Optional[int] = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
_lowerCAmelCase : int = """"""
_lowerCAmelCase : List[str] = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
_lowerCAmelCase : int = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_lowerCAmelCase : Tuple = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Dict )-> str:
'''simple docstring'''
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[Any] )-> Dict:
'''simple docstring'''
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
UpperCAmelCase__ : Optional[int] = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] )-> List[str]:
'''simple docstring'''
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Tuple = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
UpperCAmelCase__ : Tuple = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Dict )-> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : List[str] = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
UpperCAmelCase__ : Tuple = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
UpperCAmelCase__ : Union[str, Any] = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] )-> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
UpperCAmelCase__ : Tuple = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : str = Path(snake_case ) / "README.md"
with open(snake_case , "w+" ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
| 298
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : List[Any] = logging.get_logger("""transformers.models.speecht5""")
_lowerCAmelCase : Any = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
_lowerCAmelCase : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
_lowerCAmelCase : Dict = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
_lowerCAmelCase : str = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
_lowerCAmelCase : Tuple = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
_lowerCAmelCase : List[str] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
_lowerCAmelCase : Optional[Any] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
_lowerCAmelCase : Optional[Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
_lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Any = []
_lowerCAmelCase : Tuple = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
_lowerCAmelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
_lowerCAmelCase : int = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
_lowerCAmelCase : int = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[Any] , snake_case : str , snake_case : Any , snake_case : Optional[int] )-> Dict:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ : List[str] = getattr(snake_case , snake_case )
if weight_type is not None:
UpperCAmelCase__ : Any = getattr(snake_case , snake_case ).shape
else:
UpperCAmelCase__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
UpperCAmelCase__ : Dict = value
elif weight_type == "weight_g":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase__ : List[str] = value
elif weight_type == "bias":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "running_mean":
UpperCAmelCase__ : Union[str, Any] = value
elif weight_type == "running_var":
UpperCAmelCase__ : List[Any] = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase__ : List[Any] = value
else:
UpperCAmelCase__ : Union[str, Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple )-> Tuple:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase__ , UpperCAmelCase__ : str = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
if task == "s2t":
UpperCAmelCase__ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : Optional[int] = MAPPING_S2T
UpperCAmelCase__ : Optional[int] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Union[str, Any] = MAPPING_T2S
UpperCAmelCase__ : Union[str, Any] = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase__ : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase__ : str = MAPPING_S2S
UpperCAmelCase__ : int = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case , snake_case ):
logger.info(f'{name} was ignored' )
continue
UpperCAmelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase__ : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
UpperCAmelCase__ : Optional[int] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase__ : Optional[int] = True
if "*" in mapped_key:
UpperCAmelCase__ : str = name.split(snake_case )[0].split("." )[-2]
UpperCAmelCase__ : str = mapped_key.replace("*" , snake_case )
if "weight_g" in name:
UpperCAmelCase__ : Optional[int] = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ : Optional[Any] = "weight_v"
elif "bias" in name:
UpperCAmelCase__ : Any = "bias"
elif "weight" in name:
UpperCAmelCase__ : List[Any] = "weight"
elif "running_mean" in name:
UpperCAmelCase__ : Optional[int] = "running_mean"
elif "running_var" in name:
UpperCAmelCase__ : List[Any] = "running_var"
elif "num_batches_tracked" in name:
UpperCAmelCase__ : Optional[int] = "num_batches_tracked"
else:
UpperCAmelCase__ : List[Any] = None
set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case )
continue
if not is_used:
unused_weights.append(snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : Any , snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ : Dict = name.split("." )
UpperCAmelCase__ : Union[str, Any] = int(items[0] )
UpperCAmelCase__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
UpperCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
UpperCAmelCase__ : Dict = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
UpperCAmelCase__ : str = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
UpperCAmelCase__ : List[str] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Dict , snake_case : int , snake_case : Union[str, Any]=None , snake_case : Dict=None , snake_case : Tuple=None , )-> Optional[Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Union[str, Any] = SpeechTaConfig.from_pretrained(snake_case )
else:
UpperCAmelCase__ : List[Any] = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase__ : List[Any] = config.max_text_positions
UpperCAmelCase__ : str = SpeechTaForSpeechToText(snake_case )
elif task == "t2s":
UpperCAmelCase__ : Tuple = 1876
UpperCAmelCase__ : Union[str, Any] = 600
UpperCAmelCase__ : Any = config.max_speech_positions
UpperCAmelCase__ : Optional[int] = SpeechTaForTextToSpeech(snake_case )
elif task == "s2s":
UpperCAmelCase__ : List[str] = 1876
UpperCAmelCase__ : Dict = config.max_speech_positions
UpperCAmelCase__ : Union[str, Any] = SpeechTaForSpeechToSpeech(snake_case )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
UpperCAmelCase__ : int = SpeechTaTokenizer(snake_case , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase__ : List[Any] = AddedToken("<mask>" , lstrip=snake_case , rstrip=snake_case )
UpperCAmelCase__ : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
UpperCAmelCase__ : Optional[int] = SpeechTaFeatureExtractor()
UpperCAmelCase__ : int = SpeechTaProcessor(tokenizer=snake_case , feature_extractor=snake_case )
processor.save_pretrained(snake_case )
UpperCAmelCase__ : Any = torch.load(snake_case )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case , snake_case )
model.save_pretrained(snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case )
model.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCAmelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 298
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase : Optional[Any] = """src/transformers"""
_lowerCAmelCase : List[str] = """docs/source/en/tasks"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Any:
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ : Tuple = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Dict = 0
while not lines[start_index].startswith(snake_case ):
start_index += 1
start_index += 1
UpperCAmelCase__ : List[str] = start_index
while not lines[end_index].startswith(snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase__ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case , set() )
UpperCAmelCase__ : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any]=False )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = _find_text_in_file(
filename=os.path.join(snake_case , snake_case ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase__ : Optional[int] = get_model_list_for_task(snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 298
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 1
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Dict , snake_case : List[str] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase__ : Optional[Any] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCAmelCase__ : int = f'{src_lang}-{tgt_lang}'
UpperCAmelCase__ : List[Any] = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case , exist_ok=snake_case )
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "README.md" )
print(f'Generating {path}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(snake_case )
# make sure we are under the root of the project
_lowerCAmelCase : int = Path(__file__).resolve().parent.parent.parent
_lowerCAmelCase : Any = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model_name.split("""-""")
_lowerCAmelCase : Dict = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 298
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list] )-> list[list]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = current_set.copy()
for row_index, row in enumerate(snake_case ):
UpperCAmelCase__ : Optional[int] = row[0]
for column_index, column in enumerate(snake_case ):
if magnitude == 0:
UpperCAmelCase__ : int = column
continue
UpperCAmelCase__ : List[str] = column / magnitude
# Subtract to cancel term
UpperCAmelCase__ : Dict = current_set[0]
UpperCAmelCase__ : int = [first_row]
UpperCAmelCase__ : Dict = current_set[1::]
for row in current_set:
UpperCAmelCase__ : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case )
continue
for column_index in range(len(snake_case ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase__ : Optional[int] = final_set[0]
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase__ : List[str] = simplify(snake_case )
for i in range(len(snake_case ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case )
UpperCAmelCase__ : Optional[Any] = resultant
return final_set
def SCREAMING_SNAKE_CASE__ ( snake_case : list[list] )-> list:
'''simple docstring'''
if len(snake_case ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
UpperCAmelCase__ : Optional[int] = len(snake_case ) + 1
if any(len(snake_case ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(snake_case , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(snake_case ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase__ : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase__ : Any = data_set.copy()
UpperCAmelCase__ : Union[str, Any] = []
for row_index, row in enumerate(snake_case ):
if 0 not in row:
UpperCAmelCase__ : Optional[Any] = data_set.pop(snake_case )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , snake_case )
UpperCAmelCase__ : Union[str, Any] = data_set.copy()
UpperCAmelCase__ : Tuple = simplify(snake_case )
UpperCAmelCase__ : Optional[Any] = simplified[::-1]
UpperCAmelCase__ : list = []
for row in simplified:
UpperCAmelCase__ : List[str] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase__ : Tuple = row.copy()[: len(snake_case ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case ) == 0:
solutions.append(0 )
continue
UpperCAmelCase__ : List[str] = temp_row[1::]
UpperCAmelCase__ : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(snake_case ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case )
UpperCAmelCase__ : List[Any] = []
for item in solutions:
final.append(float(round(snake_case , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Any = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 298
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 1
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase : int = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
if os.path.exists(snake_case ):
if os.path.exists(os.path.join(snake_case , "config.json" ) ) and os.path.isfile(
os.path.join(snake_case , "config.json" ) ):
os.remove(os.path.join(snake_case , "config.json" ) )
if os.path.exists(os.path.join(snake_case , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(snake_case , "pytorch_model.bin" ) ):
os.remove(os.path.join(snake_case , "pytorch_model.bin" ) )
else:
os.makedirs(snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[Any]=False )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = 2
if unlogit:
UpperCAmelCase__ : Tuple = torch.pow(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = p * torch.log(snake_case )
UpperCAmelCase__ : List[Any] = 0
return -plogp.sum(dim=-1 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(f'{x + 1}' for x in range(len(snake_case ) ) ) )
for row in range(len(snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + "\t".join(f'{x:d}' for x in tensor[row].cpu().data ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] , snake_case : str , snake_case : Dict=True , snake_case : str=True , snake_case : Dict=None , snake_case : Tuple=False )-> int:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
UpperCAmelCase__ : Dict = torch.zeros(snake_case , snake_case ).to(args.device )
if head_mask is None:
UpperCAmelCase__ : List[Any] = torch.ones(snake_case , snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Dict = 0.0
UpperCAmelCase__ : List[str] = 0.0
for step, inputs in enumerate(tqdm(snake_case , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase__ : Union[str, Any] = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase__) , ) : Optional[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase__ : str = model(snake_case , labels=snake_case , head_mask=snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case ):
UpperCAmelCase__ : List[Any] = entropy(attn.detach() , snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase__ : int = 2
UpperCAmelCase__ : str = torch.pow(torch.pow(snake_case , snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
UpperCAmelCase__ : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(snake_case )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(snake_case )
logger.info("Head ranked by importance scores" )
UpperCAmelCase__ : Union[str, Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase__ : Dict = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase__ : Any = head_ranks.view_as(snake_case )
print_ad_tensor(snake_case )
return attn_entropy, head_importance, total_loss
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : List[Any] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = compute_heads_importance(snake_case , snake_case , snake_case , compute_entropy=snake_case )
UpperCAmelCase__ : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , snake_case , original_score * args.masking_threshold )
UpperCAmelCase__ : List[str] = torch.ones_like(snake_case )
UpperCAmelCase__ : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase__ : int = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase__ : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase__ : Optional[Any] = float("Inf" )
UpperCAmelCase__ : Dict = head_importance.view(-1 ).sort()[1]
if len(snake_case ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCAmelCase__ : Any = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase__ : int = new_head_mask.view(-1 )
UpperCAmelCase__ : int = 0.0
UpperCAmelCase__ : List[Any] = new_head_mask.view_as(snake_case )
UpperCAmelCase__ : List[Any] = new_head_mask.clone().detach()
print_ad_tensor(snake_case )
# Compute metric and head importance again
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , head_mask=snake_case )
UpperCAmelCase__ : Any = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(snake_case )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[Any] , snake_case : List[str] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case )
UpperCAmelCase__ : List[Any] = 1 / loss
UpperCAmelCase__ : List[Any] = datetime.now() - before_time
UpperCAmelCase__ : Optional[Any] = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : int = [
v,
]
assert sum(len(snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case )
UpperCAmelCase__ : Any = sum(p.numel() for p in model.parameters() )
UpperCAmelCase__ : Dict = datetime.now()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = compute_heads_importance(
snake_case , snake_case , snake_case , compute_entropy=snake_case , compute_importance=snake_case , head_mask=snake_case , actually_pruned=snake_case , )
UpperCAmelCase__ : str = 1 / loss
UpperCAmelCase__ : Any = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , snake_case , snake_case , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , snake_case , snake_case )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(snake_case , args.output_dir )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=snake_case , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=snake_case , type=snake_case , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=snake_case , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=snake_case , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=snake_case , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=snake_case , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=snake_case , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=snake_case , help="Batch size." )
parser.add_argument("--seed" , type=snake_case , default=42 )
parser.add_argument("--local_rank" , type=snake_case , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=snake_case , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=snake_case , default="" , help="Can be used for distant debugging." )
UpperCAmelCase__ : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase__ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCAmelCase__ : Dict = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase__ : List[Any] = torch.device("cuda" , args.local_rank )
UpperCAmelCase__ : Optional[int] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase__ : str = nn.parallel.DistributedDataParallel(
snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case )
elif args.n_gpu > 1:
UpperCAmelCase__ : int = nn.DataParallel(snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case )
torch.save(snake_case , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , snake_case )
# Prepare dataset
UpperCAmelCase__ : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase__ : Any = (torch.from_numpy(snake_case ),)
UpperCAmelCase__ : Any = TensorDataset(*snake_case )
UpperCAmelCase__ : Dict = RandomSampler(snake_case )
UpperCAmelCase__ : List[str] = DataLoader(snake_case , sampler=snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case , snake_case , snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase__ : int = mask_heads(snake_case , snake_case , snake_case )
prune_heads(snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 298
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = """T5Config"""
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mt5'''
SCREAMING_SNAKE_CASE_ =MTaConfig
| 298
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_lowerCAmelCase : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_lowerCAmelCase : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print("""\n""".join(upper_files) + """\n""")
_lowerCAmelCase : Optional[int] = [file for file in filepaths if """ """ in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print("""\n""".join(space_files) + """\n""")
_lowerCAmelCase : str = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print("""\n""".join(hyphen_files) + """\n""")
_lowerCAmelCase : Tuple = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print("""\n""".join(nodir_files) + """\n""")
_lowerCAmelCase : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 298
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''dinat'''
SCREAMING_SNAKE_CASE_ ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=3 , snake_case__ : Tuple=6_4 , snake_case__ : str=[3, 4, 6, 5] , snake_case__ : Optional[Any]=[2, 4, 8, 1_6] , snake_case__ : Any=7 , snake_case__ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case__ : str=3.0 , snake_case__ : Dict=True , snake_case__ : Optional[Any]=0.0 , snake_case__ : str=0.0 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[int]=1e-5 , snake_case__ : Optional[int]=0.0 , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , **snake_case__ : List[str] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : List[str] = embed_dim
UpperCAmelCase__ : Any = depths
UpperCAmelCase__ : int = len(snake_case__ )
UpperCAmelCase__ : List[Any] = num_heads
UpperCAmelCase__ : List[str] = kernel_size
UpperCAmelCase__ : Any = dilations
UpperCAmelCase__ : List[Any] = mlp_ratio
UpperCAmelCase__ : List[Any] = qkv_bias
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Any = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = drop_path_rate
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : Optional[int] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
UpperCAmelCase__ : Optional[int] = layer_scale_init_value
UpperCAmelCase__ : List[Any] = ["stem"] + [f'stage{idx}' for idx in range(1 , len(snake_case__ ) + 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 298
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 1
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase : Tuple = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase : int = """config.json"""
_lowerCAmelCase : Dict = """diffusion_pytorch_model.bin"""
_lowerCAmelCase : Dict = """diffusion_flax_model.msgpack"""
_lowerCAmelCase : int = """model.onnx"""
_lowerCAmelCase : List[Any] = """diffusion_pytorch_model.safetensors"""
_lowerCAmelCase : Any = """weights.pb"""
_lowerCAmelCase : int = """https://huggingface.co"""
_lowerCAmelCase : Union[str, Any] = default_cache_path
_lowerCAmelCase : str = """diffusers_modules"""
_lowerCAmelCase : List[str] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_lowerCAmelCase : Any = ["""fp16""", """non-ema"""]
_lowerCAmelCase : List[str] = """.self_attn"""
| 298
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'{test_file} instead.' )
UpperCAmelCase__ : Union[str, Any] = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
UpperCAmelCase__ : List[str] = components[:-1] + [test_fn.replace(".py" , "" )]
UpperCAmelCase__ : List[Any] = ".".join(snake_case )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = get_module_path(snake_case )
UpperCAmelCase__ : Dict = importlib.import_module(snake_case )
return test_module
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Tuple = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : List[str] = get_test_module(snake_case )
for attr in dir(snake_case ):
UpperCAmelCase__ : Optional[Any] = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCAmelCase__ : List[str] = getattr(snake_case , "all_model_classes" , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Any:
'''simple docstring'''
UpperCAmelCase__ : int = get_test_classes(snake_case )
UpperCAmelCase__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = test_class()
if hasattr(snake_case , "setUp" ):
test.setUp()
UpperCAmelCase__ : Optional[int] = None
if hasattr(snake_case , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCAmelCase__ : List[Any] = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = get_test_classes(snake_case )
UpperCAmelCase__ : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : int )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = get_test_classes_for_model(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = []
for test_class in test_classes:
UpperCAmelCase__ : List[Any] = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : Any = get_test_classes(snake_case )
UpperCAmelCase__ : Any = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_model_classes(snake_case )
UpperCAmelCase__ : Optional[int] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = get_model_classes(snake_case )
UpperCAmelCase__ : int = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict )-> List[str]:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : str = len(snake_case )
UpperCAmelCase__ : List[str] = int(math.floor(math.sqrt(snake_case ) ) )
UpperCAmelCase__ : Union[str, Any] = 0
while arr[min(snake_case , snake_case ) - 1] < x:
UpperCAmelCase__ : Union[str, Any] = step
step += int(math.floor(math.sqrt(snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase__ : int = prev + 1
if prev == min(snake_case , snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCAmelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCAmelCase : Tuple = [int(item) for item in user_input.split(""",""")]
_lowerCAmelCase : Tuple = int(input("""Enter the number to be searched:\n"""))
_lowerCAmelCase : str = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F"""Number {x} is at index {res}""")
| 298
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 1
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCAmelCase : int = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 1:2].values
_lowerCAmelCase : Optional[Any] = dataset.iloc[:, 2].values
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCAmelCase : str = PolynomialFeatures(degree=4)
_lowerCAmelCase : int = poly_reg.fit_transform(X)
_lowerCAmelCase : Any = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
plt.scatter(snake_case , snake_case , color="red" )
plt.plot(snake_case , pol_reg.predict(poly_reg.fit_transform(snake_case ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 298
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[int] , snake_case : List[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : int = state_dict.pop(snake_case )
UpperCAmelCase__ : str = val
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase__ : Any = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase__ : Dict = value
else:
UpperCAmelCase__ : int = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = in_proj_weight[:256, :]
UpperCAmelCase__ : int = in_proj_bias[:256]
UpperCAmelCase__ : List[str] = in_proj_weight[256:512, :]
UpperCAmelCase__ : List[Any] = in_proj_bias[256:512]
UpperCAmelCase__ : Optional[int] = in_proj_weight[-256:, :]
UpperCAmelCase__ : str = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase__ : Tuple = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Dict = in_proj_weight[:256, :]
UpperCAmelCase__ : Tuple = in_proj_bias[:256]
UpperCAmelCase__ : Union[str, Any] = in_proj_weight[256:512, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias[256:512]
UpperCAmelCase__ : List[Any] = in_proj_weight[-256:, :]
UpperCAmelCase__ : List[Any] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase__ : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
UpperCAmelCase__ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase__ : List[str] = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase__ : str = in_proj_bias_cross_attn[:256]
UpperCAmelCase__ : Tuple = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase__ : List[Any] = in_proj_bias_cross_attn[256:512]
UpperCAmelCase__ : int = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase__ : Optional[Any] = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image.size
UpperCAmelCase__ : Tuple = max(snake_case , snake_case )
UpperCAmelCase__ : List[Any] = 800 if "detection" in checkpoint_url else 1000
UpperCAmelCase__ : List[str] = target_max_size / current_max_size
UpperCAmelCase__ : Dict = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = F.to_tensor(snake_case )
UpperCAmelCase__ : List[Any] = F.normalize(snake_case , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Any , snake_case : Optional[int] )-> Any:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
UpperCAmelCase__ : str = rename_backbone_keys(snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase__ : Dict = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase__ : Optional[Any] = state_dict.pop(snake_case )
UpperCAmelCase__ : List[str] = val
# create HuggingFace model and load state dict
UpperCAmelCase__ : Optional[int] = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase__ : List[Any] = 15
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = {0: "table", 1: "table rotated"}
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ : Any = 125
UpperCAmelCase__ : Dict = 6
UpperCAmelCase__ : int = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase__ : str = idalabel
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Any = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
UpperCAmelCase__ : Optional[Any] = TableTransformerForObjectDetection(snake_case )
model.load_state_dict(snake_case )
model.eval()
# verify our conversion
UpperCAmelCase__ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase__ : str = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=snake_case )
UpperCAmelCase__ : Optional[Any] = Image.open(snake_case ).convert("RGB" )
UpperCAmelCase__ : Optional[Any] = normalize(resize(snake_case , snake_case ) ).unsqueeze(0 )
UpperCAmelCase__ : str = model(snake_case )
if "detection" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = (1, 15, 3)
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
UpperCAmelCase__ : int = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
UpperCAmelCase__ : List[str] = (1, 125, 7)
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
UpperCAmelCase__ : List[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase__ : List[Any] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(snake_case )
image_processor.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> bool:
'''simple docstring'''
UpperCAmelCase__ : Tuple = 0
for ch in input_str:
UpperCAmelCase__ : int = ord(snake_case )
UpperCAmelCase__ : Union[str, Any] = pow(2 , snake_case )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 1
|
"""simple docstring"""
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : List[str] = """Hello world! cécé herlolip"""
_lowerCAmelCase : List[str] = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : List[Any] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = BertAbsConfig(
temp_dir="." , finetune_bert=snake_case , large=snake_case , share_emb=snake_case , use_bert_emb=snake_case , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
UpperCAmelCase__ : Dict = torch.load(snake_case , lambda snake_case , snake_case : storage )
UpperCAmelCase__ : Optional[int] = AbsSummarizer(snake_case , torch.device("cpu" ) , snake_case )
original.eval()
UpperCAmelCase__ : List[str] = BertAbsSummarizer(snake_case , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
UpperCAmelCase__ : Optional[int] = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
UpperCAmelCase__ : Union[str, Any] = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case ).unsqueeze(0 )
UpperCAmelCase__ : List[Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
UpperCAmelCase__ : Dict = torch.tensor(snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase__ : Dict = encoder_input_ids
UpperCAmelCase__ : str = decoder_input_ids
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase__ : Optional[Any] = original(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )[0]
UpperCAmelCase__ : int = original.generator(snake_case )
UpperCAmelCase__ : Any = new_model(
snake_case , snake_case , snake_case , snake_case , snake_case )[0]
UpperCAmelCase__ : Dict = new_model.generator(snake_case )
UpperCAmelCase__ : List[str] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case ) )
UpperCAmelCase__ : Dict = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case ) )
UpperCAmelCase__ : int = torch.allclose(snake_case , snake_case , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 298
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : int )-> float:
'''simple docstring'''
UpperCAmelCase__ : str = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 1
|
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 298
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 1
|
"""simple docstring"""
import sys
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : int = [[0 for x in range(snake_case )] for x in range(snake_case )]
UpperCAmelCase__ : str = [[0 for x in range(snake_case )] for x in range(snake_case )]
for chain_length in range(2 , snake_case ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase__ : Dict = a + chain_length - 1
UpperCAmelCase__ : Optional[Any] = sys.maxsize
for c in range(snake_case , snake_case ):
UpperCAmelCase__ : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase__ : Optional[Any] = cost
UpperCAmelCase__ : Any = c
return matrix, sol
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[int] , snake_case : Optional[int] )-> Any:
'''simple docstring'''
if i == j:
print("A" + str(snake_case ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(snake_case , snake_case , optimal_solution[i][j] )
print_optiomal_solution(snake_case , optimal_solution[i][j] + 1 , snake_case )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase__ : Optional[int] = len(snake_case )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase__ , UpperCAmelCase__ : Dict = matrix_chain_order(snake_case )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(snake_case , 1 , n - 1 )
if __name__ == "__main__":
main()
| 298
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 1
|
"""simple docstring"""
import pprint
import requests
_lowerCAmelCase : List[str] = """https://zenquotes.io/api"""
def SCREAMING_SNAKE_CASE__ ( )-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def SCREAMING_SNAKE_CASE__ ( )-> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = random_quotes()
pprint.pprint(response)
| 298
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 1
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , snake_case__ : Dict[str, int] , snake_case__ : List[str] , snake_case__ : int = None , snake_case__ : int = None ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[Any] = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : int = vocab
UpperCAmelCase__ : Optional[int] = merges
UpperCAmelCase__ : List[Any] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def __a ( cls : Optional[int] , snake_case__ : GPTaTokenizer , *snake_case__ : int , **snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [" ".join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : List[str] = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def __a ( cls : str , snake_case__ : Union[str, os.PathLike] , *snake_case__ : Any , **snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def __a ( cls : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
return cls(**snake_case__ )
def __a ( self : Any ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def __a ( self : Tuple , snake_case__ : Any , snake_case__ : int = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.tf_tokenizer(snake_case__ )
UpperCAmelCase__ : Optional[int] = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 298
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 1
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__magic_name__ ):
SCREAMING_SNAKE_CASE_ =['''torch''', '''scipy''']
def __init__( self : Any , *snake_case__ : str , **snake_case__ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch", "scipy"] )
@classmethod
def __a ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def __a ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
| 298
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 1
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Optional[Any] = r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowerCAmelCase__ :
@add_start_docstrings(snake_case__ )
def __call__( self : Any , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase__ :
@add_start_docstrings(snake_case__ )
def __call__( self : str , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase__ ( __magic_name__ ):
@add_start_docstrings(snake_case__ )
def __call__( self : List[Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int , **snake_case__ : List[Any] ):
'''simple docstring'''
for processor in self:
UpperCAmelCase__ : Optional[Any] = inspect.signature(processor.__call__ ).parameters
if len(snake_case__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
UpperCAmelCase__ : Dict = processor(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
else:
UpperCAmelCase__ : int = processor(snake_case__ , snake_case__ , snake_case__ )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , snake_case__ : float ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
UpperCAmelCase__ : str = temperature
def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = scores / self.temperature
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , snake_case__ : float , snake_case__ : float = -float("Inf" ) , snake_case__ : int = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(snake_case__ , snake_case__ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
UpperCAmelCase__ : Tuple = top_p
UpperCAmelCase__ : int = filter_value
UpperCAmelCase__ : Any = min_tokens_to_keep
def __call__( self : List[Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = lax.top_k(snake_case__ , scores.shape[-1] )
UpperCAmelCase__ : str = jnp.full_like(snake_case__ , self.filter_value )
UpperCAmelCase__ : List[str] = jax.nn.softmax(snake_case__ , axis=-1 ).cumsum(axis=-1 )
UpperCAmelCase__ : int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCAmelCase__ : str = jnp.roll(snake_case__ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case__ )
# min tokens to keep
UpperCAmelCase__ : Dict = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case__ )
UpperCAmelCase__ : int = jnp.where(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : int = jax.lax.sort_key_val(snake_case__ , snake_case__ )[-1]
return next_scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : float = -float("Inf" ) , snake_case__ : int = 1 ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
UpperCAmelCase__ : List[str] = max(snake_case__ , snake_case__ )
UpperCAmelCase__ : str = filter_value
def __call__( self : List[str] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = scores.shape
UpperCAmelCase__ : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value )
UpperCAmelCase__ : Optional[int] = min(self.top_k , scores.shape[-1] ) # Safety check
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = lax.top_k(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to((jnp.arange(snake_case__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
UpperCAmelCase__ : str = topk_scores.flatten()
UpperCAmelCase__ : Optional[int] = topk_indices.flatten() + shift
UpperCAmelCase__ : Dict = next_scores_flat.at[topk_indices_flat].set(snake_case__ )
UpperCAmelCase__ : Tuple = next_scores_flat.reshape(snake_case__ , snake_case__ )
return next_scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = bos_token_id
def __call__( self : Optional[int] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : str = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ : List[str] = 1 - jnp.bool_(cur_len - 1 )
UpperCAmelCase__ : Optional[int] = jnp.where(snake_case__ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case__ )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : int , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = max_length
UpperCAmelCase__ : int = eos_token_id
def __call__( self : Union[str, Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = jnp.full(scores.shape , -float("inf" ) )
UpperCAmelCase__ : Optional[int] = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCAmelCase__ : Dict = jnp.where(snake_case__ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case__ )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(snake_case__ , snake_case__ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
UpperCAmelCase__ : Dict = min_length
UpperCAmelCase__ : Optional[Any] = eos_token_id
def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
# create boolean flag to decide if min length penalty should be applied
UpperCAmelCase__ : Any = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
UpperCAmelCase__ : List[str] = jnp.where(snake_case__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , snake_case__ )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Tuple , snake_case__ : List[str] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = list(snake_case__ )
UpperCAmelCase__ : List[Any] = begin_index
def __call__( self : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : str = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCAmelCase__ : Tuple = jnp.where(snake_case__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , snake_case__ )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Optional[Any] , snake_case__ : list ):
'''simple docstring'''
UpperCAmelCase__ : Any = list(snake_case__ )
def __call__( self : Union[str, Any] , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = dict(snake_case__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCAmelCase__ : List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCAmelCase__ : str = force_token_array.at[index].set(snake_case__ )
UpperCAmelCase__ : str = jnp.intaa(snake_case__ )
def __call__( self : Dict , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : int ):
'''simple docstring'''
def _force_token(snake_case__ : Union[str, Any] ):
UpperCAmelCase__ : List[Any] = scores.shape[0]
UpperCAmelCase__ : Union[str, Any] = self.force_token_array[generation_idx]
UpperCAmelCase__ : Any = jnp.ones_like(snake_case__ , dtype=scores.dtype ) * -float("inf" )
UpperCAmelCase__ : str = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
UpperCAmelCase__ : Dict = lax.dynamic_update_slice(snake_case__ , snake_case__ , (0, current_token) )
return new_scores
UpperCAmelCase__ : List[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case__ ) , lambda: scores , ) , )
return scores
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = generate_config.eos_token_id
UpperCAmelCase__ : str = generate_config.no_timestamps_token_id
UpperCAmelCase__ : int = generate_config.no_timestamps_token_id + 1
UpperCAmelCase__ : Union[str, Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case__ , "max_initial_timestamp_index" ):
UpperCAmelCase__ : Dict = generate_config.max_initial_timestamp_index
else:
UpperCAmelCase__ : int = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCAmelCase__ : Union[str, Any] = model_config.vocab_size
def __call__( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# suppress <|notimestamps|> which is handled by without_timestamps
UpperCAmelCase__ : int = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
UpperCAmelCase__ : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case__ , )
UpperCAmelCase__ : List[Any] = jnp.where((cur_len - self.begin_index) < 2 , snake_case__ , snake_case__ )
UpperCAmelCase__ : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case__ , snake_case__ , )
return jnp.where(
snake_case__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , snake_case__ , )
UpperCAmelCase__ : Tuple = jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[int] = jnp.where(cur_len == self.begin_index , snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case__ , )
UpperCAmelCase__ : str = self.timestamp_begin + self.max_initial_timestamp_index
UpperCAmelCase__ : Optional[int] = jnp.where(
snake_case__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , snake_case__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCAmelCase__ : int = jax.nn.log_softmax(snake_case__ , axis=-1 )
def handle_cumulative_probs(snake_case__ : Dict , snake_case__ : List[Any] ):
UpperCAmelCase__ : Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
UpperCAmelCase__ : List[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , snake_case__ , )
UpperCAmelCase__ : List[Any] = jax.vmap(snake_case__ )(snake_case__ , snake_case__ )
return scores
| 298
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 1
|
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase : Optional[int] = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase__ : Optional[int] = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Tuple = os.path.join(snake_case , "all_results.json" )
if os.path.exists(snake_case ):
with open(snake_case , "r" ) as f:
UpperCAmelCase__ : Any = json.load(snake_case )
else:
raise ValueError(f'can\'t find {path}' )
return results
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
_lowerCAmelCase : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( __magic_name__ ):
@classmethod
def __a ( cls : List[str] ):
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
UpperCAmelCase__ : List[str] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __a ( cls : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : int = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Optional[Any] = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : str = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
self.assertLess(result["perplexity"] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase__ : Dict = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Any = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[Any] = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Tuple = get_results(snake_case__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 2_8 )
self.assertGreaterEqual(result["eval_exact"] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : List[str] = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Dict = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : str = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_rouge1"] , 1_0 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Tuple = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : List[str] = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_bleu"] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "translation_no_trainer" ) ) )
@slow
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : str = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[int] = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
UpperCAmelCase__ : List[str] = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ : Optional[Any] = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
UpperCAmelCase__ : Dict = get_results(snake_case__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ , "image_classification_no_trainer" ) ) )
| 298
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , snake_case__ : int , snake_case__ : List[str]=1_3 , snake_case__ : Any=3_2 , snake_case__ : Tuple=3 , snake_case__ : str=4 , snake_case__ : Optional[Any]=[1_0, 2_0, 3_0, 4_0] , snake_case__ : Any=[2, 2, 3, 2] , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=True , snake_case__ : List[Any]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : str=1_0 , snake_case__ : int=0.02 , snake_case__ : Optional[int]=["stage2", "stage3", "stage4"] , snake_case__ : Optional[Any]=3 , snake_case__ : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Optional[Any] = num_stages
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : List[Any] = depths
UpperCAmelCase__ : Optional[Any] = is_training
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Tuple = out_features
UpperCAmelCase__ : int = num_labels
UpperCAmelCase__ : Dict = scope
UpperCAmelCase__ : Any = num_stages
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = self.get_config()
return config, pixel_values, labels
def __a ( self : Dict ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __a ( self : str ):
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case__ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __a ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : int = UperNetForSemanticSegmentation(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Any = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ ={'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = UperNetModelTester(self )
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : int ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
UpperCAmelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __a ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __a ( self : Tuple ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
def __a ( self : str ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ : str , snake_case__ : Tuple , snake_case__ : str ):
UpperCAmelCase__ : Any = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase__ : str = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Optional[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = _config_zero_init(snake_case__ )
UpperCAmelCase__ : int = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(config=snake_case__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __a ( self : Any ):
'''simple docstring'''
pass
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
UpperCAmelCase__ : Dict = Image.open(snake_case ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
UpperCAmelCase__ : Any = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case__ )
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : Optional[int] = processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
UpperCAmelCase__ : List[str] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = prepare_img()
UpperCAmelCase__ : Dict = processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
with torch.no_grad():
UpperCAmelCase__ : int = model(**snake_case__ )
UpperCAmelCase__ : str = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) )
| 298
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : bool , snake_case : list[int] , snake_case : float )-> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , snake_case , snake_case , snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , snake_case , snake_case , snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 3_4423]
UpperCAmelCase__ : List[str] = math.log(len(snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , snake_case , snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 298
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : Any = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
_lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : int = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
_lowerCAmelCase : Tuple = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ =FunnelTokenizer
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =2
def __init__( self : Optional[int] , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Any="<sep>" , snake_case__ : int="<pad>" , snake_case__ : Union[str, Any]="<cls>" , snake_case__ : Optional[Any]="<mask>" , snake_case__ : Dict="<s>" , snake_case__ : int="</s>" , snake_case__ : Any=True , snake_case__ : str=True , snake_case__ : Optional[int]=None , snake_case__ : List[Any]="##" , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Union[str, Any] = getattr(snake_case__ , normalizer_state.pop("type" ) )
UpperCAmelCase__ : int = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Union[str, Any] = tokenize_chinese_chars
UpperCAmelCase__ : Union[str, Any] = normalizer_class(**snake_case__ )
UpperCAmelCase__ : Optional[int] = do_lower_case
def __a ( self : Dict , snake_case__ : Tuple , snake_case__ : List[str]=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 298
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> list[int]:
'''simple docstring'''
UpperCAmelCase__ : int = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
UpperCAmelCase__ , UpperCAmelCase__ : int = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCAmelCase__ : List[str] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCAmelCase__ : str = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = i, i + z_result[i] - 1
return z_result
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list[int] , snake_case : str )-> bool:
'''simple docstring'''
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCAmelCase__ : Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase__ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Dict = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : Optional[Any] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : Any = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("CPU" , font_size=2_4 )
UpperCAmelCase__ : Optional[int] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
UpperCAmelCase__ : Dict = [mem.copy() for i in range(4 )]
UpperCAmelCase__ : List[Any] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : Union[str, Any] = Text("GPU" , font_size=2_4 )
UpperCAmelCase__ : Any = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case__ )
UpperCAmelCase__ : List[str] = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : List[str] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : Optional[int] = Text("Model" , font_size=2_4 )
UpperCAmelCase__ : List[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.add(snake_case__ )
UpperCAmelCase__ : Any = []
for i, rect in enumerate(snake_case__ ):
rect.set_stroke(snake_case__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase__ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=snake_case__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=snake_case__ , buff=0.0 )
self.add(snake_case__ )
cpu_targs.append(snake_case__ )
UpperCAmelCase__ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase__ : Tuple = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
UpperCAmelCase__ : List[str] = Text("Loaded Checkpoint" , font_size=2_4 )
UpperCAmelCase__ : Dict = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , aligned_edge=snake_case__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase__ : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase__ : List[str] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=1_8 , )
blue_text.next_to(snake_case__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase__ : Optional[Any] = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ ) , Write(snake_case__ ) )
self.play(Write(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) )
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[Any] = []
for i, rect in enumerate(snake_case__ ):
UpperCAmelCase__ : List[str] = fill.copy().set_fill(snake_case__ , opacity=0.7 )
target.move_to(snake_case__ )
first_animations.append(GrowFromCenter(snake_case__ , run_time=1 ) )
UpperCAmelCase__ : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 298
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : Any , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (6_4,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 2_5_6 , snake_case__ : int = 3_2 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.1_8215 , snake_case__ : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ : Any = Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
UpperCAmelCase__ : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ : Tuple = nn.Convad(snake_case__ , snake_case__ , 1 )
UpperCAmelCase__ : List[Any] = VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
UpperCAmelCase__ : Any = nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
UpperCAmelCase__ : Any = Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def __a ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.encoder(snake_case__ )
UpperCAmelCase__ : List[str] = self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def __a ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = self.quantize(snake_case__ )
else:
UpperCAmelCase__ : Union[str, Any] = h
UpperCAmelCase__ : Any = self.post_quant_conv(snake_case__ )
UpperCAmelCase__ : int = self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def __a ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = sample
UpperCAmelCase__ : List[Any] = self.encode(snake_case__ ).latents
UpperCAmelCase__ : Optional[Any] = self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 298
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase__ : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = plt.imshow(snake_case )
fig.axes.get_xaxis().set_visible(snake_case )
fig.axes.get_yaxis().set_visible(snake_case )
plt.show()
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = datetime.now()
UpperCAmelCase__ : Dict = current_time.strftime("%H:%M:%S" )
return timestamp
| 298
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 1
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_lowerCAmelCase : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
_lowerCAmelCase : Dict = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase : Tuple = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase : List[str] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
_lowerCAmelCase : Optional[int] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase : Dict = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase : Union[str, Any] = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
_lowerCAmelCase : List[Any] = re.compile(r"""^\s*try:""")
# Catches a line with else:
_lowerCAmelCase : str = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Optional[Any]:
'''simple docstring'''
if _re_test_backend.search(snake_case ) is None:
return None
UpperCAmelCase__ : int = [b[0] for b in _re_backend.findall(snake_case )]
backends.sort()
return "_and_".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> List[str]:
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ : Union[str, Any] = f.readlines()
UpperCAmelCase__ : Optional[Any] = 0
while line_index < len(snake_case ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCAmelCase__ : str = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
UpperCAmelCase__ : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case ):
UpperCAmelCase__ : Union[str, Any] = _re_one_line_import_struct.search(snake_case ).groups()[0]
UpperCAmelCase__ : Tuple = re.findall(r"\[([^\]]+)\]" , snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
UpperCAmelCase__ : str = _re_import_struct_key_value.search(snake_case )
if single_line_import_search is not None:
UpperCAmelCase__ : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(snake_case ) > 0]
objects.extend(snake_case )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
UpperCAmelCase__ : List[str] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCAmelCase__ : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
UpperCAmelCase__ : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case ) is not None:
UpperCAmelCase__ : Dict = _re_import_struct_add_many.search(snake_case ).groups()[0].split(", " )
UpperCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(snake_case ) > 0]
objects.extend(snake_case )
elif _re_between_brackets.search(snake_case ) is not None:
UpperCAmelCase__ : Optional[int] = _re_between_brackets.search(snake_case ).groups()[0].split(", " )
UpperCAmelCase__ : str = [obj[1:-1] for obj in imports if len(snake_case ) > 0]
objects.extend(snake_case )
elif _re_quote_object.search(snake_case ) is not None:
objects.append(_re_quote_object.search(snake_case ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
UpperCAmelCase__ : Dict = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCAmelCase__ : Dict = []
while (
line_index < len(snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
UpperCAmelCase__ : Optional[int] = lines[line_index]
UpperCAmelCase__ : List[str] = _re_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCAmelCase__ : Dict = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCAmelCase__ : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCAmelCase__ : Tuple = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCAmelCase__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
UpperCAmelCase__ : int = lines[line_index]
UpperCAmelCase__ : Dict = _re_import.search(snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCAmelCase__ : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : List[str] )-> Any:
'''simple docstring'''
def find_duplicates(snake_case : Tuple ):
return [k for k, v in collections.Counter(snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCAmelCase__ : Optional[Any] = []
for key in import_dict_objects.keys():
UpperCAmelCase__ : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
UpperCAmelCase__ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCAmelCase__ : str = "base imports" if key == "none" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
for root, _, files in os.walk(snake_case ):
if "__init__.py" in files:
UpperCAmelCase__ : Dict = os.path.join(snake_case , "__init__.py" )
UpperCAmelCase__ : Optional[Any] = parse_init(snake_case )
if objects is not None:
UpperCAmelCase__ : Optional[Any] = analyze_results(*snake_case )
if len(snake_case ) > 0:
UpperCAmelCase__ : List[str] = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(snake_case ) )
if len(snake_case ) > 0:
raise ValueError("\n\n".join(snake_case ) )
def SCREAMING_SNAKE_CASE__ ( )-> int:
'''simple docstring'''
UpperCAmelCase__ : str = []
for path, directories, files in os.walk(snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case ) / folder).glob("*.py" ) ) ) == 0:
continue
UpperCAmelCase__ : Tuple = str((Path(snake_case ) / folder).relative_to(snake_case ) )
UpperCAmelCase__ : List[Any] = short_path.replace(os.path.sep , "." )
submodules.append(snake_case )
for fname in files:
if fname == "__init__.py":
continue
UpperCAmelCase__ : Dict = str((Path(snake_case ) / fname).relative_to(snake_case ) )
UpperCAmelCase__ : Any = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(snake_case )
return submodules
_lowerCAmelCase : List[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCAmelCase__ : List[str] = direct_transformers_import(snake_case )
UpperCAmelCase__ : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case , "__init__.py" ) , "r" ) as f:
UpperCAmelCase__ : Any = f.read()
import_structure_keys.update(set(re.findall(r"import_structure\[\"([^\"]*)\"\]" , snake_case ) ) )
UpperCAmelCase__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case ) > 0:
UpperCAmelCase__ : Optional[Any] = "\n".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
"""simple docstring"""
import random
from typing import Any
def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> list[Any]:
'''simple docstring'''
for _ in range(len(snake_case ) ):
UpperCAmelCase__ : Dict = random.randint(0 , len(snake_case ) - 1 )
UpperCAmelCase__ : List[Any] = random.randint(0 , len(snake_case ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
_lowerCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7]
_lowerCAmelCase : List[str] = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 298
|
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298
| 1
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
debug_launcher(test_script.main )
def __a ( self : Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 298
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bytes )-> str:
'''simple docstring'''
return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> bytes:
'''simple docstring'''
if (len(snake_case ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(snake_case ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
import numpy as np
import datasets
_lowerCAmelCase : Optional[int] = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_lowerCAmelCase : Tuple = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_lowerCAmelCase : Optional[int] = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
'''simple docstring'''
# convert to numpy arrays
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ )
UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T )
try:
UpperCAmelCase__ : str = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ )
UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ )
UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 298
| 1
|
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float )-> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 298
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =IFPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __a ( self : Dict ):
'''simple docstring'''
return self._get_dummy_components()
def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : str = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self : Tuple ):
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __a ( self : Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __a ( self : int ):
'''simple docstring'''
self._test_save_load_local()
def __a ( self : Any ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : str ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Tuple ):
'''simple docstring'''
# if
UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : List[Any] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : str = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Optional[Any] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ):
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : int = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (6_4, 6_4, 3)
UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
UpperCAmelCase__ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
UpperCAmelCase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 298
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowerCAmelCase : Optional[int] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_lowerCAmelCase : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : int = cn.convert_to_negative(snake_case )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( )-> Any:
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(snake_case , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase__ : Tuple = canny.canny(snake_case )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( )-> Tuple:
'''simple docstring'''
assert gg.gaussian_filter(snake_case , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase__ : int = conv.img_convolve(snake_case , snake_case ).astype(snake_case )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
assert med.median_filter(snake_case , 3 ).any()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = sob.sobel_filter(snake_case )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = sp.make_sepia(snake_case , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : str = bs.Burkes(imread(snake_case , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "digital_image_processing/image_data/lena_small.jpg" , )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = rs.NearestNeighbour(imread(snake_case , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase__ : Dict = imread(snake_case , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Any = image[x_coordinate][y_coordinate]
UpperCAmelCase__ : int = lbp.get_neighbors_pixel(
snake_case , snake_case , snake_case , snake_case )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase__ : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase__ : List[Any] = lbp.local_binary_value(snake_case , snake_case , snake_case )
assert lbp_image.any()
| 298
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_lowerCAmelCase : int = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = set()
UpperCAmelCase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Dict = char
UpperCAmelCase__ : Tuple = set(snake_case )
return pairs
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : Dict = vocab_file
UpperCAmelCase__ : Tuple = merges_file
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Dict = 3
self.add_from_file(snake_case__ )
UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(snake_case__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Dict = {}
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
UpperCAmelCase__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [self.sep_token_id]
UpperCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.encoder )
def __a ( self : Any ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : Dict , snake_case__ : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCAmelCase__ : Any = get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Tuple = 0
while i < len(snake_case__ ):
try:
UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Dict = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Dict = tuple(snake_case__ )
UpperCAmelCase__ : List[Any] = new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase__ : Dict = get_pairs(snake_case__ )
UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ )
UpperCAmelCase__ : Optional[int] = word[:-4]
UpperCAmelCase__ : Union[str, Any] = word
return word
def __a ( self : List[Any] , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) )
return split_tokens
def __a ( self : Dict , snake_case__ : List[str] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __a ( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
return self.decoder.get(snake_case__ , self.unk_token )
def __a ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Tuple = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ):
copyfile(self.merges_file , snake_case__ )
return out_vocab_file, out_merge_file
def __a ( self : List[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
UpperCAmelCase__ : Dict = f.readlines()
for lineTmp in lines:
UpperCAmelCase__ : Optional[int] = lineTmp.strip()
UpperCAmelCase__ : Tuple = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
UpperCAmelCase__ : Any = line[:idx]
UpperCAmelCase__ : str = len(self.encoder )
| 298
| 1
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase__ ( yaml.SafeLoader ):
def __a ( self : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
UpperCAmelCase__ : Union[str, Any] = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
UpperCAmelCase__ : List[str] = Counter(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def __a ( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Tuple[Optional[str], str]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
UpperCAmelCase__ : str = full_content[1:].index("---" ) + 1
UpperCAmelCase__ : Union[str, Any] = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(snake_case )
class lowerCAmelCase__ ( __magic_name__ ):
# class attributes
SCREAMING_SNAKE_CASE_ ={'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __a ( cls : Dict , snake_case__ : Path ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def __a ( self : Any , snake_case__ : Path ):
'''simple docstring'''
if path.exists():
with open(snake_case__ , encoding="utf-8" ) as readme_file:
UpperCAmelCase__ : List[str] = readme_file.read()
else:
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[Any] = self._to_readme(snake_case__ )
with open(snake_case__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(snake_case__ )
def __a ( self : Union[str, Any] , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = _split_yaml_from_readme(snake_case__ )
UpperCAmelCase__ : int = "---\n" + self.to_yaml_string() + "---\n" + content
else:
UpperCAmelCase__ : Any = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __a ( cls : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
UpperCAmelCase__ : int = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding="utf-8" , ).decode("utf-8" )
_lowerCAmelCase : Tuple = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCAmelCase : List[str] = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_lowerCAmelCase : Union[str, Any] = ap.parse_args()
_lowerCAmelCase : Union[str, Any] = Path(args.readme_filepath)
_lowerCAmelCase : Dict = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 298
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298
| 1
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] )-> str:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Dict = np.full((len(snake_case ), sequence_length, 2) , snake_case )
else:
UpperCAmelCase__ : List[str] = np.full((len(snake_case ), sequence_length) , snake_case )
for i, tensor in enumerate(snake_case ):
if padding_side == "right":
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : List[str] = tensor[:sequence_length]
else:
UpperCAmelCase__ : Dict = tensor[:sequence_length]
else:
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
else:
UpperCAmelCase__ : Tuple = tensor[:sequence_length]
return out_tensor.tolist()
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ord(snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase__ : Optional[int] = unicodedata.category(snake_case )
if cat.startswith("P" ):
return True
return False
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =True
SCREAMING_SNAKE_CASE_ =None
SCREAMING_SNAKE_CASE_ =None
SCREAMING_SNAKE_CASE_ =-100
SCREAMING_SNAKE_CASE_ ="pt"
def __a ( self : Union[str, Any] , snake_case__ : Any ):
'''simple docstring'''
import torch
UpperCAmelCase__ : List[Any] = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase__ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase__ : Any = self.tokenizer.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase__ : Any = torch.tensor(batch["entity_ids"] ).shape[1]
UpperCAmelCase__ : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase__ : List[str] = [
list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels
]
else:
UpperCAmelCase__ : str = [
[self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels
]
UpperCAmelCase__ : Dict = [feature["ner_tags"] for feature in features]
UpperCAmelCase__ : str = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[int] = [feature["original_entity_spans"] for feature in features]
UpperCAmelCase__ : str = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 298
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowerCAmelCase__ :
def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : Union[str, Any] = act_dim
UpperCAmelCase__ : Dict = state_dim
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : List[str] = max_length
UpperCAmelCase__ : int = is_training
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 )
UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
UpperCAmelCase__ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __a ( self : int ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ =()
SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
SCREAMING_SNAKE_CASE_ =False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = DecisionTransformerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : str = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized
UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Optional[int] = model.config
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset()
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ )
UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
UpperCAmelCase__ : Union[str, Any] = state
UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 )
UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model(
states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ),
1.0,
False,
{},
)
UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1]
UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 )
UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward
UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
UpperCAmelCase__ : Tuple = torch.cat(
[timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 298
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCAmelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 298
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : int )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', "stage2.cls_token") )
return token
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : Dict , snake_case : List[Any] )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "imagenet-1k-id2label.json"
UpperCAmelCase__ : int = 1000
UpperCAmelCase__ : Dict = "huggingface/label-files"
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : int = json.load(open(cached_download(hf_hub_url(snake_case , snake_case , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase__ : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = CvtConfig(num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
UpperCAmelCase__ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
UpperCAmelCase__ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ : List[Any] = [2, 2, 20]
UpperCAmelCase__ : Tuple = [3, 12, 16]
UpperCAmelCase__ : Optional[Any] = [192, 768, 1024]
UpperCAmelCase__ : Optional[int] = CvtForImageClassification(snake_case )
UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : int = torch.load(snake_case , map_location=torch.device("cpu" ) )
UpperCAmelCase__ : List[Any] = OrderedDict()
UpperCAmelCase__ : List[str] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ : List[Any] = list_of_state_dict + cls_token(snake_case )
UpperCAmelCase__ : Optional[Any] = list_of_state_dict + embeddings(snake_case )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ : Union[str, Any] = list_of_state_dict + attention(snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(snake_case )
for i in range(len(snake_case ) ):
UpperCAmelCase__ : Optional[int] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(snake_case )
model.save_pretrained(snake_case )
image_processor.save_pretrained(snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 298
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
_lowerCAmelCase : Optional[int] = get_logger(__name__)
_lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md"""
_lowerCAmelCase : Dict = uuida().hex
_lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
_lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]:
'''simple docstring'''
if token is None:
UpperCAmelCase__ : Optional[Any] = HfFolder.get_token()
if organization is None:
UpperCAmelCase__ : Tuple = whoami(snake_case )["name"]
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None
UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case )
UpperCAmelCase__ : Tuple = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple:
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() )
UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case )
if search is None:
return None
UpperCAmelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
_lowerCAmelCase : Dict = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
_lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None:
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase__ : str = old_diffusers_cache
UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser()
UpperCAmelCase__ : Any = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
_lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
_lowerCAmelCase : Any = 0
else:
with open(cache_version_file) as f:
try:
_lowerCAmelCase : List[str] = int(f.read())
except ValueError:
_lowerCAmelCase : Optional[int] = 0
if cache_version < 1:
_lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
_lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str:
'''simple docstring'''
if variant is not None:
UpperCAmelCase__ : int = weights_name.split("." )
UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase__ : Optional[int] = ".".join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *,
snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : List[str] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
UpperCAmelCase__ : List[Any] = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase__ : Dict = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"this model name. Check the model page at "
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : Union[str, Any] = [8, 5, 9, 7]
_lowerCAmelCase : Union[str, Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : list[int] , snake_case__ : list[list[int]] , snake_case__ : list[list[int]] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = claim_vector
UpperCAmelCase__ : Tuple = allocated_resources_table
UpperCAmelCase__ : List[Any] = maximum_claim_table
def __a ( self : Optional[int] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __a ( self : Tuple ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __a ( self : Optional[int] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case__ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __a ( self : Optional[Any] ):
'''simple docstring'''
return {self.__need().index(snake_case__ ): i for i in self.__need()}
def __a ( self : Union[str, Any] , **snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__need()
UpperCAmelCase__ : List[str] = self.__allocated_resources_table
UpperCAmelCase__ : List[Any] = self.__available_resources()
UpperCAmelCase__ : Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 5_0 + "\n" )
while need_list:
UpperCAmelCase__ : Dict = False
for each_need in need_list:
UpperCAmelCase__ : str = True
for index, need in enumerate(snake_case__ ):
if need > available_resources[index]:
UpperCAmelCase__ : Tuple = False
break
if execution:
UpperCAmelCase__ : Optional[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase__ : Union[str, Any] = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case__ )
# update available/freed resources stack
UpperCAmelCase__ : int = np.array(snake_case__ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(snake_case__ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def __a ( self : Dict ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case__ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(snake_case__ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(snake_case__ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298
| 1
|
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase__ ( __magic_name__ ):
def __a ( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase__ : Tuple = input_file.read()
UpperCAmelCase__ : Tuple = regexp.search(snake_case__ )
return match
def __a ( self : List[str] , snake_case__ : str ):
'''simple docstring'''
with open(snake_case__ , encoding="utf-8" ) as input_file:
UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : int = regexp.finditer(snake_case__ )
UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path("./datasets" )
UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case__ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Path("./datasets" )
UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case__ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 298
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : int = batch_size
UpperCAmelCase__ : int = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : Union[str, Any] = use_input_mask
UpperCAmelCase__ : Optional[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : Any = embedding_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_hidden_groups
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Tuple = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : str = AlbertModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ )
UpperCAmelCase__ : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_labels
UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.num_choices
UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ : Tuple = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =True
def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
UpperCAmelCase__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ )
UpperCAmelCase__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AlbertModelTester(self )
UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def __a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" )
UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
| 298
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any]=False )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[Any]=False )-> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase__ : Any = ""
else:
UpperCAmelCase__ : Union[str, Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase__ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
UpperCAmelCase__ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Union[str, Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Any = dct.pop(snake_case )
UpperCAmelCase__ : Tuple = val
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ : int = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[int] )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ViTConfig()
UpperCAmelCase__ : int = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[int] = int(vit_name[-12:-10] )
UpperCAmelCase__ : List[str] = int(vit_name[-9:-6] )
else:
UpperCAmelCase__ : Optional[int] = 1000
UpperCAmelCase__ : List[Any] = "huggingface/label-files"
UpperCAmelCase__ : Optional[Any] = "imagenet-1k-id2label.json"
UpperCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ : Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
UpperCAmelCase__ : List[Any] = idalabel
UpperCAmelCase__ : List[str] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : Tuple = int(vit_name[-6:-4] )
UpperCAmelCase__ : Union[str, Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
UpperCAmelCase__ : List[Any] = 192
UpperCAmelCase__ : Dict = 768
UpperCAmelCase__ : Tuple = 12
UpperCAmelCase__ : Tuple = 3
elif vit_name[9:].startswith("small" ):
UpperCAmelCase__ : List[str] = 384
UpperCAmelCase__ : Dict = 1536
UpperCAmelCase__ : Union[str, Any] = 12
UpperCAmelCase__ : Any = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
UpperCAmelCase__ : str = 768
UpperCAmelCase__ : Tuple = 2304
UpperCAmelCase__ : Dict = 8
UpperCAmelCase__ : List[str] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
UpperCAmelCase__ : Union[str, Any] = 1024
UpperCAmelCase__ : Union[str, Any] = 4096
UpperCAmelCase__ : List[str] = 24
UpperCAmelCase__ : List[Any] = 16
elif vit_name[4:].startswith("huge" ):
UpperCAmelCase__ : str = 1280
UpperCAmelCase__ : Tuple = 5120
UpperCAmelCase__ : Any = 32
UpperCAmelCase__ : Any = 16
# load original model from timm
UpperCAmelCase__ : List[str] = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case )
UpperCAmelCase__ : Optional[Any] = create_rename_keys(snake_case , snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase__ : Union[str, Any] = ViTModel(snake_case ).eval()
else:
UpperCAmelCase__ : Union[str, Any] = ViTForImageClassification(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCAmelCase__ : str = DeiTImageProcessor(size=config.image_size )
else:
UpperCAmelCase__ : str = ViTImageProcessor(size=config.image_size )
UpperCAmelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase__ : Optional[int] = encoding["pixel_values"]
UpperCAmelCase__ : Optional[Any] = model(snake_case )
if base_model:
UpperCAmelCase__ : Dict = timm_model.forward_features(snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase__ : Any = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 298
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : str = list(range(snake_case ) )
# Find permutation
while factorials:
UpperCAmelCase__ : str = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 400_0000 )-> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Any = b, a + b
return sum(snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 298
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""0.12.2"""):
raise Exception("""requires fairseq >= 0.12.2""")
if version.parse(fairseq.__version__) > version.parse("""2"""):
raise Exception("""requires fairseq < v2""")
logging.set_verbosity_info()
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Any = """Hello, World!"""
_lowerCAmelCase : List[Any] = """en_XX"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : bool )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Any = Path("data_bin" )
UpperCAmelCase__ : int = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case ).parent ) , checkpoint_file=Path(snake_case ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case )
UpperCAmelCase__ : Any = xmod.model.encoder.sentence_encoder
UpperCAmelCase__ : List[str] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase__ : List[str] = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case )
UpperCAmelCase__ : str = XmodForSequenceClassification(snake_case ) if classification_head else XmodForMaskedLM(snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase__ : int = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase__ : int = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase__ : Any = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase__ : str = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase__ : Optional[Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase__ : str = model.roberta.encoder.layer[i]
UpperCAmelCase__ : Dict = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase__ : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
UpperCAmelCase__ : Optional[int] = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase__ : int = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase__ : int = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase__ : Optional[Any] = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase__ : Dict = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase__ : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase__ : int = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
UpperCAmelCase__ : Optional[int] = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase__ : List[str] = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase__ : int = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase__ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase__ : Tuple = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
UpperCAmelCase__ : str = xmod_layer.fca.weight
UpperCAmelCase__ : List[Any] = xmod_layer.fca.bias
# output
UpperCAmelCase__ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
UpperCAmelCase__ : Optional[int] = xmod_layer.fca.weight
UpperCAmelCase__ : str = xmod_layer.fca.bias
UpperCAmelCase__ : int = xmod_layer.final_layer_norm.weight
UpperCAmelCase__ : Optional[int] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase__ : List[str] = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase__ : Optional[Any] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase__ : List[str] = bert_output.adapter_modules[lang_code]
UpperCAmelCase__ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase__ : List[Any] = from_adapter.fca.weight
UpperCAmelCase__ : List[str] = from_adapter.fca.bias
UpperCAmelCase__ : Optional[int] = from_adapter.fca.weight
UpperCAmelCase__ : Tuple = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase__ : Union[str, Any] = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase__ : Tuple = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase__ : Union[str, Any] = xmod.model.classification_heads["mnli"].dense.weight
UpperCAmelCase__ : int = xmod.model.classification_heads["mnli"].dense.bias
UpperCAmelCase__ : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
UpperCAmelCase__ : str = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
UpperCAmelCase__ : str = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase__ : Optional[int] = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase__ : List[Any] = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase__ : int = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase__ : List[str] = xmod.model.encoder.lm_head.weight
UpperCAmelCase__ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase__ : Union[str, Any] = xmod.encode(snake_case ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case )
UpperCAmelCase__ : Union[str, Any] = model(snake_case )[0]
if classification_head:
UpperCAmelCase__ : Dict = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case ) )
else:
UpperCAmelCase__ : str = xmod.model(snake_case , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
UpperCAmelCase__ : int = torch.allclose(snake_case , snake_case , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : int = "mock-s3-bucket"
UpperCAmelCase__ : Any = f's3://{mock_bucket}'
UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case )
assert dataset_path.startswith("s3://" ) is False
UpperCAmelCase__ : str = "./local/path"
UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case )
assert dataset_path == new_dataset_path
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is True
UpperCAmelCase__ : str = fsspec.filesystem("file" )
UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case )
UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case )
assert isinstance(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case )
UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
UpperCAmelCase__ : int = compressed_file_paths[protocol]
UpperCAmelCase__ : Any = "dataset.jsonl"
UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}'
UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case )
assert fs.isfile(snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case )
UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case , snake_case , clobber=snake_case )
with pytest.warns(snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 298
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCAmelCase : List[Any] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class lowerCAmelCase__ ( unittest.TestCase , __magic_name__ ):
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_tool("text-question-answering" )
self.tool.setup()
UpperCAmelCase__ : Optional[Any] = load_tool("text-question-answering" , remote=snake_case__ )
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.remote_tool(snake_case__ , "What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.remote_tool(text=snake_case__ , question="What did Hugging Face do in April 2021?" )
self.assertEqual(snake_case__ , "launched the BigScience Research Workshop" )
| 298
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''megatron-bert'''
def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : str = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Tuple = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : Any = use_cache
| 298
| 1
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE_ =Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE_ =Features({'''labels''': ClassLabel} )
SCREAMING_SNAKE_CASE_ ="image"
SCREAMING_SNAKE_CASE_ ="labels"
def __a ( self : int , snake_case__ : Optional[int] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCAmelCase__ : List[Any] = copy.deepcopy(self )
UpperCAmelCase__ : Optional[Any] = self.label_schema.copy()
UpperCAmelCase__ : List[str] = features[self.label_column]
UpperCAmelCase__ : int = label_schema
return task_template
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 298
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Dict ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , )
def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __a ( self : Any , snake_case__ : str , snake_case__ : str ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
class lowerCAmelCase__ ( datasets.BeamBasedBuilder ):
def __a ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE__ ( )-> List[Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCAmelCase__ ( __magic_name__ ):
@require_beam
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : Dict ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase__ : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __a ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , snake_case__ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 298
| 1
|
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=512,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Tuple:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : Optional[Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.exp(snake_case )
UpperCAmelCase__ : Optional[int] = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
UpperCAmelCase__ : Any = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = config.output_attentions
UpperCAmelCase__ : int = config.output_hidden_states
UpperCAmelCase__ : str = nn.ModuleList([BertLayer(snake_case__ ) for _ in range(config.num_hidden_layers )] )
UpperCAmelCase__ : int = nn.ModuleList([BertHighway(snake_case__ ) for _ in range(config.num_hidden_layers )] )
UpperCAmelCase__ : List[str] = [-1 for _ in range(config.num_hidden_layers )]
def __a ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
if (type(snake_case__ ) is float) or (type(snake_case__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCAmelCase__ : Dict = x
else:
UpperCAmelCase__ : Optional[Any] = x
def __a ( self : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any]=None , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ()
UpperCAmelCase__ : Optional[int] = ()
UpperCAmelCase__ : Dict = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCAmelCase__ : List[str] = all_hidden_states + (hidden_states,)
UpperCAmelCase__ : int = layer_module(
snake_case__ , snake_case__ , head_mask[i] , snake_case__ , snake_case__ )
UpperCAmelCase__ : Any = layer_outputs[0]
if self.output_attentions:
UpperCAmelCase__ : str = all_attentions + (layer_outputs[1],)
UpperCAmelCase__ : int = (hidden_states,)
if self.output_hidden_states:
UpperCAmelCase__ : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCAmelCase__ : int = current_outputs + (all_attentions,)
UpperCAmelCase__ : Optional[int] = self.highway[i](snake_case__ )
# logits, pooled_output
if not self.training:
UpperCAmelCase__ : List[Any] = highway_exit[0]
UpperCAmelCase__ : List[str] = entropy(snake_case__ )
UpperCAmelCase__ : Any = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCAmelCase__ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCAmelCase__ : Tuple = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(snake_case__ , i + 1 )
else:
UpperCAmelCase__ : Any = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCAmelCase__ : List[str] = all_hidden_states + (hidden_states,)
UpperCAmelCase__ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
UpperCAmelCase__ : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCAmelCase__ : List[Any] = outputs + (all_attentions,)
UpperCAmelCase__ : Tuple = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __magic_name__ , )
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , snake_case__ : Any ):
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase__ : List[str] = config
UpperCAmelCase__ : Optional[Any] = BertEmbeddings(snake_case__ )
UpperCAmelCase__ : str = DeeBertEncoder(snake_case__ )
UpperCAmelCase__ : Tuple = BertPooler(snake_case__ )
self.init_weights()
def __a ( self : Tuple ):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.embeddings.word_embeddings
def __a ( self : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = value
def __a ( self : Optional[int] , snake_case__ : List[Any] ):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def __a ( self : Dict , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : int=None , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , snake_case__ : Dict=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase__ : Tuple = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase__ : Optional[int] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase__ : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase__ : Tuple = torch.ones(snake_case__ , device=snake_case__ )
if encoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = torch.ones(snake_case__ , device=snake_case__ )
if token_type_ids is None:
UpperCAmelCase__ : Optional[int] = torch.zeros(snake_case__ , dtype=torch.long , device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase__ : torch.Tensor = self.get_extended_attention_mask(snake_case__ , snake_case__ , snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCAmelCase__ : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCAmelCase__ : Dict = encoder_attention_mask[:, None, None, :]
UpperCAmelCase__ : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCAmelCase__ : int = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase__ : Optional[int] = self.get_head_mask(snake_case__ , self.config.num_hidden_layers )
UpperCAmelCase__ : str = self.embeddings(
input_ids=snake_case__ , position_ids=snake_case__ , token_type_ids=snake_case__ , inputs_embeds=snake_case__ )
UpperCAmelCase__ : Any = self.encoder(
snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
UpperCAmelCase__ : Any = encoder_outputs[0]
UpperCAmelCase__ : Dict = self.pooler(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = message
UpperCAmelCase__ : Optional[int] = exit_layer # start from 1!
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : str , snake_case__ : Tuple ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[Any] = BertPooler(snake_case__ )
UpperCAmelCase__ : Dict = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase__ : int = nn.Linear(config.hidden_size , config.num_labels )
def __a ( self : int , snake_case__ : Any ):
'''simple docstring'''
# Pooler
UpperCAmelCase__ : List[Any] = encoder_outputs[0]
UpperCAmelCase__ : List[str] = self.pooler(snake_case__ )
# "return" pooler_output
# BertModel
UpperCAmelCase__ : int = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCAmelCase__ : Tuple = bmodel_output[1]
UpperCAmelCase__ : Any = self.dropout(snake_case__ )
UpperCAmelCase__ : Dict = self.classifier(snake_case__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __magic_name__ , )
class lowerCAmelCase__ ( __magic_name__ ):
def __init__( self : Dict , snake_case__ : int ):
'''simple docstring'''
super().__init__(snake_case__ )
UpperCAmelCase__ : List[Any] = config.num_labels
UpperCAmelCase__ : Optional[Any] = config.num_hidden_layers
UpperCAmelCase__ : Optional[Any] = DeeBertModel(snake_case__ )
UpperCAmelCase__ : List[str] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase__ : str = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def __a ( self : List[str] , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : Dict=None , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : Dict=-1 , snake_case__ : List[Any]=False , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_layers
try:
UpperCAmelCase__ : Any = self.bert(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , position_ids=snake_case__ , head_mask=snake_case__ , inputs_embeds=snake_case__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCAmelCase__ : Dict = outputs[1]
UpperCAmelCase__ : Optional[int] = self.dropout(snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.classifier(snake_case__ )
UpperCAmelCase__ : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase__ : str = e.message
UpperCAmelCase__ : List[str] = e.exit_layer
UpperCAmelCase__ : int = outputs[0]
if not self.training:
UpperCAmelCase__ : Dict = entropy(snake_case__ )
UpperCAmelCase__ : List[str] = []
UpperCAmelCase__ : Any = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase__ : Dict = MSELoss()
UpperCAmelCase__ : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase__ : List[Any] = CrossEntropyLoss()
UpperCAmelCase__ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCAmelCase__ : int = []
for highway_exit in outputs[-1]:
UpperCAmelCase__ : List[str] = highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase__ : Optional[int] = MSELoss()
UpperCAmelCase__ : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase__ : Optional[Any] = CrossEntropyLoss()
UpperCAmelCase__ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(snake_case__ )
if train_highway:
UpperCAmelCase__ : Optional[int] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase__ : Optional[int] = (loss,) + outputs
if not self.training:
UpperCAmelCase__ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase__ : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 298
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ):
'''simple docstring'''
UpperCAmelCase__ : Any = "bilinear"
UpperCAmelCase__ : Any = max_size
UpperCAmelCase__ : Any = short_edge_length
def __call__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = []
for img in imgs:
UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w
else:
UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = newh * scale
UpperCAmelCase__ : int = neww * scale
UpperCAmelCase__ : List[Any] = int(neww + 0.5 )
UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase__ : Any = Image.fromarray(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ )
else:
UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase__ : Tuple = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class lowerCAmelCase__ :
def __init__( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase__ : Any = cfg.INPUT.FORMAT
UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
UpperCAmelCase__ : str = cfg.PAD_VALUE
UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE
UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def __a ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images]
UpperCAmelCase__ : int = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : str , snake_case__ : int , snake_case__ : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Dict = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase__ : Tuple = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int:
'''simple docstring'''
assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=snake_case )
tensor[:, 1].clamp_(min=0 , max=snake_case )
tensor[:, 2].clamp_(min=0 , max=snake_case )
tensor[:, 3].clamp_(min=0 , max=snake_case )
| 298
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
_lowerCAmelCase : str = {
"""camembert-base""": 512,
}
_lowerCAmelCase : Union[str, Any] = """▁"""
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : str , snake_case__ : str , snake_case__ : Optional[Any]="<s>" , snake_case__ : int="</s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : int=["<s>NOTUSED", "</s>NOTUSED"] , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
UpperCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
UpperCAmelCase__ : List[str] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase__ : str = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase__ : Optional[int] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : int ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Optional[Any] , snake_case__ : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def __a ( self : Optional[int] , snake_case__ : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case__ )
def __a ( self : Optional[int] , snake_case__ : Optional[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : Dict , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Optional[int] = ""
UpperCAmelCase__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case__ ) + token
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[int] = []
else:
current_sub_tokens.append(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = False
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase__ : int = None
return state
def __setstate__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase__ : Union[str, Any] = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
UpperCAmelCase__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 298
|
"""simple docstring"""
import qiskit
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts:
'''simple docstring'''
UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" )
UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 298
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =XGLMConfig
SCREAMING_SNAKE_CASE_ ={}
SCREAMING_SNAKE_CASE_ ='''gelu'''
def __init__( self : List[str] , snake_case__ : str , snake_case__ : str=1_4 , snake_case__ : Any=7 , snake_case__ : Any=True , snake_case__ : int=True , snake_case__ : int=True , snake_case__ : int=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Any=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Any="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : Tuple=0.02 , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[Any] = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : List[Any] = use_input_mask
UpperCAmelCase__ : List[Any] = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : Any = d_model
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : str = ffn_dim
UpperCAmelCase__ : Optional[Any] = activation_function
UpperCAmelCase__ : Union[str, Any] = activation_dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[Any] = 1
def __a ( self : Optional[int] ):
'''simple docstring'''
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase__ : Tuple = None
if self.use_input_mask:
UpperCAmelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Optional[int] = self.get_config()
UpperCAmelCase__ : List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self : List[Any] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ =(TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ =(
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFXGLMModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=snake_case__ , n_embd=3_7 )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] , snake_case__ : Tuple=True ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase__ : Optional[int] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCAmelCase__ : Tuple = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
UpperCAmelCase__ : Union[str, Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
UpperCAmelCase__ : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
UpperCAmelCase__ : Optional[int] = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
UpperCAmelCase__ : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
UpperCAmelCase__ : List[Any] = "left"
# use different length sentences to test batching
UpperCAmelCase__ : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
UpperCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
UpperCAmelCase__ : Optional[Any] = inputs["input_ids"]
UpperCAmelCase__ : Dict = model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] , max_new_tokens=1_2 )
UpperCAmelCase__ : Optional[int] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Tuple = model.generate(input_ids=snake_case__ , max_new_tokens=1_2 )
UpperCAmelCase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCAmelCase__ : str = model.generate(input_ids=snake_case__ , max_new_tokens=1_2 )
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase__ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase__ : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 298
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''efficientformer'''
def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = hidden_sizes
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : List[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : Optional[int] = depths
UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio
UpperCAmelCase__ : Dict = downsamples
UpperCAmelCase__ : Any = dim
UpperCAmelCase__ : str = key_dim
UpperCAmelCase__ : List[Any] = attention_ratio
UpperCAmelCase__ : Optional[Any] = resolution
UpperCAmelCase__ : Optional[Any] = pool_size
UpperCAmelCase__ : Any = downsample_patch_size
UpperCAmelCase__ : int = downsample_stride
UpperCAmelCase__ : Dict = downsample_pad
UpperCAmelCase__ : List[Any] = drop_path_rate
UpperCAmelCase__ : Optional[Any] = num_metaad_blocks
UpperCAmelCase__ : List[str] = distillation
UpperCAmelCase__ : Dict = use_layer_scale
UpperCAmelCase__ : List[Any] = layer_scale_init_value
UpperCAmelCase__ : Optional[Any] = image_size
UpperCAmelCase__ : Optional[int] = batch_norm_eps
| 298
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.