code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__lowerCAmelCase : Optional[int] = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def snake_case_ ( _snake_case : Optional[Any] ):
__lowercase : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : str , _snake_case : str , _snake_case : int , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[Any] , *_snake_case : List[Any] , ):
__lowercase : Optional[int] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
__lowercase : Optional[int] = model_type
__lowercase : Union[str, Any] = tf_checkpoint
__lowercase : Optional[Any] = pytorch_dump_output
__lowercase : Dict = config
__lowercase : Tuple = finetuning_task_name
def snake_case_ ( self : str ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
__lowercase : Tuple = self._tf_checkpoint
__lowercase : str = ""
else:
__lowercase : Union[str, Any] = self._tf_checkpoint
__lowercase : List[Any] = ""
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 156 |
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
A = 4
A = 48
A = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = [6, 6, 6, 6]
A = 60
A = [6, 6, 6, 6]
A = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = 4
A = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
A = 1
A = 1
A = 126
A = 7
A = 2_55.0
A = ""
return config
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
A = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
A = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
A = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
A = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A = name.replace("attn" , "attention.self" )
if "norm1" in name:
A = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
A = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
A = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
A = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
A = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
A = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
A = "layernorm.weight"
if name == "norm.bias":
A = "layernorm.bias"
if "conv_first" in name:
A = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
A = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
A = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
A = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
A = name.replace("upsample.2" , "upsample.convolution_1" )
A = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
A = name.replace("upsample.0.weight" , "upsample.conv.weight" )
A = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
A = "swin2sr." + name
return name
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
A = key.split("." )
A = int(key_split[1] )
A = int(key_split[4] )
A = config.embed_dim
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
pass
else:
A = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ):
"""simple docstring"""
A = get_config(lowercase__ )
A = SwinaSRForImageSuperResolution(lowercase__ )
model.eval()
A = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" )
A = convert_state_dict(lowercase__ , lowercase__ )
A , A = model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0:
raise ValueError("Missing keys when converting: {}".format(lowercase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
A = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
A = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
A = 126 if "Jpeg" in checkpoint_url else 256
A = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
A = transforms(lowercase__ ).unsqueeze(0 )
if config.num_channels == 1:
A = pixel_values[:, 0, :, :].unsqueeze(1 )
A = model(lowercase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 512, 512] )
A = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
A = torch.Size([1, 3, 1_024, 1_024] )
A = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
A = torch.Size([1, 3, 1_024, 1_024] )
A = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
A = torch.Size([1, 3, 512, 512] )
A = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
A = torch.Size([1, 3, 1_024, 1_024] )
A = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1e-3 )
print("Looks ok!" )
A = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
A = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__A : Tuple = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 57 |
"""simple docstring"""
from __future__ import annotations
class __UpperCamelCase :
def __init__(self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str):
A , A = text, pattern
A , A = len(__SCREAMING_SNAKE_CASE), len(__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
for i in range(self.patLen - 1 , -1 , -1):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE__ (self : str , __SCREAMING_SNAKE_CASE : int):
for i in range(self.patLen - 1 , -1 , -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1):
A = self.mismatch_in_text(__SCREAMING_SNAKE_CASE)
if mismatch_index == -1:
positions.append(__SCREAMING_SNAKE_CASE)
else:
A = self.match_in_pattern(self.text[mismatch_index])
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__A : int = 'ABAABA'
__A : Optional[Any] = 'AB'
__A : Any = BoyerMooreSearch(text, pattern)
__A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 57 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=UpperCamelCase__ ):
_a = ["note_seq"]
def __init__( self , *_a , **_a ) -> Dict:
requires_backends(self , ["""note_seq"""] )
@classmethod
def a__ ( cls , *_a , **_a ) -> Optional[int]:
requires_backends(cls , ["""note_seq"""] )
@classmethod
def a__ ( cls , *_a , **_a ) -> Tuple:
requires_backends(cls , ["""note_seq"""] )
| 26 |
"""simple docstring"""
import os
from distutils.util import strtobool
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for e in env_keys:
__lowerCAmelCase = int(os.environ.get(_lowerCAmelCase , -1 ) )
if val >= 0:
return val
return default
def lowercase (_lowerCAmelCase , _lowerCAmelCase=False ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return strtobool(_lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowercase (_lowerCAmelCase , _lowerCAmelCase="no" ):
__lowerCAmelCase = os.environ.get(_lowerCAmelCase , str(_lowerCAmelCase ) )
return value
| 301 | 0 |
class a : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[str] , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ):
UpperCAmelCase_ = row
UpperCAmelCase_ = col
UpperCAmelCase_ = graph
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : int , __snake_case : int , __snake_case : list[list[bool]] ):
# Checking all 8 elements surrounding nth element
UpperCAmelCase_ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
UpperCAmelCase_ = [-1, 0, 1, -1, 1, -1, 0, 1]
UpperCAmelCase_ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __snake_case ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __snake_case )
def lowerCamelCase_ ( self : Dict ): # And finally, count all islands.
UpperCAmelCase_ = [[False for j in range(self.COL )] for i in range(self.ROW )]
UpperCAmelCase_ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__snake_case , __snake_case , __snake_case )
count += 1
return count
| 177 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowerCAmelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : Union[str, Any] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Dict ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase : Optional[int] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__snake_case : Tuple ):
if isinstance(__snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(__snake_case )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any]=0 ):
if str(__snake_case ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__snake_case )
else:
UpperCAmelCase_ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__snake_case , device=torch.device(__snake_case ) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
UpperCAmelCase_ = 10.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase_ = self.get_dummy_inputs(__snake_case )
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**__snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : List[Any] ):
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__snake_case , controlnet=__snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__snake_case )
UpperCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
UpperCAmelCase_ = pipe(
__snake_case , __snake_case , control_image=__snake_case , generator=__snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9E-2
| 177 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 20 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
return EnvironmentCommand()
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = huggingface_hub.__version__
snake_case_ = "not installed"
snake_case_ = "NA"
if is_torch_available():
import torch
snake_case_ = torch.__version__
snake_case_ = torch.cuda.is_available()
snake_case_ = "not installed"
if is_transformers_available():
import transformers
snake_case_ = transformers.__version__
snake_case_ = "not installed"
if is_accelerate_available():
import accelerate
snake_case_ = accelerate.__version__
snake_case_ = "not installed"
if is_xformers_available():
import xformers
snake_case_ = xformers.__version__
snake_case_ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( a__ ) -> str:
'''simple docstring'''
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 85 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = []
a : Optional[Any] = []
for i in range(self.num_layers):
a : int = self.in_channels if i == 0 else self.out_channels
a : Any = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Optional[int] = resnets
a : List[Any] = attentions
if self.add_downsample:
a : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=True):
"""simple docstring"""
a : List[str] = ()
for resnet, attn in zip(self.resnets , self.attentions):
a : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Any = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Dict = self.in_channels if i == 0 else self.out_channels
a : Tuple = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Any = resnets
if self.add_downsample:
a : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=True):
"""simple docstring"""
a : Dict = ()
for resnet in self.resnets:
a : Any = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Tuple = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = []
a : str = []
for i in range(self.num_layers):
a : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
a : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : int = resnets
a : Dict = attentions
if self.add_upsample:
a : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=True):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions):
# pop res hidden states
a : Union[str, Any] = res_hidden_states_tuple[-1]
a : Tuple = res_hidden_states_tuple[:-1]
a : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : str = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : int = resnets
if self.add_upsample:
a : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=True):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
a : Optional[Any] = res_hidden_states_tuple[-1]
a : Dict = res_hidden_states_tuple[:-1]
a : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a : Any = []
for _ in range(self.num_layers):
a : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Tuple = resnets
a : Tuple = attentions
def __call__( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=True):
"""simple docstring"""
a : Dict = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_)
for attn, resnet in zip(self.attentions , self.resnets[1:]):
a : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
return hidden_states
| 364 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowercase ( _A ) -> List[str]:
SCREAMING_SNAKE_CASE : int = args.pruning_method
SCREAMING_SNAKE_CASE : Dict = args.threshold
SCREAMING_SNAKE_CASE : int = args.model_name_or_path.rstrip("""/""" )
SCREAMING_SNAKE_CASE : Optional[Any] = args.target_model_path
print(F"Load fine-pruned model from {model_name_or_path}" )
SCREAMING_SNAKE_CASE : Dict = torch.load(os.path.join(_A , """pytorch_model.bin""" ) )
SCREAMING_SNAKE_CASE : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
SCREAMING_SNAKE_CASE : List[str] = tensor
print(F"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
SCREAMING_SNAKE_CASE : str = tensor
print(F"Copied layer {name}" )
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = tensor
print(F"Copied layer {name}" )
else:
if pruning_method == "magnitude":
SCREAMING_SNAKE_CASE : Tuple = MagnitudeBinarizer.apply(inputs=_A , threshold=_A )
SCREAMING_SNAKE_CASE : Any = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : List[str] = name[:-6]
SCREAMING_SNAKE_CASE : str = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE : int = TopKBinarizer.apply(_A , _A )
SCREAMING_SNAKE_CASE : List[str] = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = name[:-6]
SCREAMING_SNAKE_CASE : Optional[int] = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE : Dict = ThresholdBinarizer.apply(_A , _A , _A )
SCREAMING_SNAKE_CASE : int = tensor * mask
print(F"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
SCREAMING_SNAKE_CASE : Any = name[:-6]
SCREAMING_SNAKE_CASE : Tuple = model[F"{prefix_}mask_scores"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = -0.1, 1.1
SCREAMING_SNAKE_CASE : str = torch.sigmoid(_A )
SCREAMING_SNAKE_CASE : Dict = s * (r - l) + l
SCREAMING_SNAKE_CASE : int = s_bar.clamp(min=0.0 , max=1.0 )
SCREAMING_SNAKE_CASE : Optional[Any] = tensor * mask
print(F"Pruned layer {name}" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
SCREAMING_SNAKE_CASE : Dict = os.path.join(
os.path.dirname(_A ) , F"bertarized_{os.path.basename(_A )}" )
if not os.path.isdir(_A ):
shutil.copytree(_A , _A )
print(F"\nCreated folder {target_model_path}" )
torch.save(_A , os.path.join(_A , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
UpperCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 245 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] ="""nllb-moe"""
UpperCAmelCase__ : Any =["""past_key_values"""]
UpperCAmelCase__ : Dict ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[Any]=1_2_8_1_1_2 , UpperCAmelCase__ : Tuple=1_0_2_4 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Union[str, Any]=0.05 , UpperCAmelCase__ : Any=0.05 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]="relu" , UpperCAmelCase__ : Dict=1_0_2_4 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="float32" , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Union[str, Any]=1_2_8 , UpperCAmelCase__ : Any=6_4 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Optional[Any]=0.0_01 , UpperCAmelCase__ : Dict="all" , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=0.2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Tuple=False , **UpperCAmelCase__ : Union[str, Any] , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : str = encoder_attention_heads
SCREAMING_SNAKE_CASE : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : str = decoder_layers
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = dropout
SCREAMING_SNAKE_CASE : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE : str = activation_dropout
SCREAMING_SNAKE_CASE : Dict = activation_function
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE : Tuple = router_z_loss_coef
SCREAMING_SNAKE_CASE : Tuple = router_aux_loss_coef
SCREAMING_SNAKE_CASE : List[Any] = decoder_sparse_step
SCREAMING_SNAKE_CASE : Any = encoder_sparse_step
SCREAMING_SNAKE_CASE : Tuple = num_experts
SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity
SCREAMING_SNAKE_CASE : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
SCREAMING_SNAKE_CASE : Optional[int] = router_dtype
SCREAMING_SNAKE_CASE : Any = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE : Any = batch_prioritized_routing
SCREAMING_SNAKE_CASE : Optional[Any] = second_expert_policy
SCREAMING_SNAKE_CASE : Any = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE : Tuple = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE : int = moe_token_dropout
SCREAMING_SNAKE_CASE : Optional[int] = output_router_logits
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 245 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 64 | """simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> str | Literal[False]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = list(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = list(_lowerCamelCase )
__lowerCamelCase : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__lowerCamelCase : Optional[int] = "_"
if count > 1:
return False
else:
return "".join(_lowerCamelCase )
def lowercase_ ( _lowerCamelCase: list[str] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = []
while True:
__lowerCamelCase : Dict = ["$"] * len(_lowerCamelCase )
__lowerCamelCase : Any = []
for i in range(len(_lowerCamelCase ) ):
for j in range(i + 1 , len(_lowerCamelCase ) ):
__lowerCamelCase : str = compare_string(binary[i] , binary[j] )
if k is False:
__lowerCamelCase : str = "*"
__lowerCamelCase : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(_lowerCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_lowerCamelCase ) == 0:
return pi
__lowerCamelCase : Tuple = list(set(_lowerCamelCase ) )
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: Sequence[float] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = []
for minterm in minterms:
__lowerCamelCase : Union[str, Any] = ""
for _ in range(_lowerCamelCase ):
__lowerCamelCase : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(_lowerCamelCase )
return temp
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: int ) -> bool:
'''simple docstring'''
__lowerCamelCase : Tuple = list(_lowerCamelCase )
__lowerCamelCase : Optional[int] = list(_lowerCamelCase )
__lowerCamelCase : str = 0
for i in range(len(_lowerCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowercase_ ( _lowerCamelCase: list[list[int]] , _lowerCamelCase: list[str] ) -> list[str]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : str = [0] * len(_lowerCamelCase )
for i in range(len(chart[0] ) ):
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Optional[Any] = -1
for j in range(len(_lowerCamelCase ) ):
if chart[j][i] == 1:
count += 1
__lowerCamelCase : List[Any] = j
if count == 1:
__lowerCamelCase : Optional[Any] = 1
for i in range(len(_lowerCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
__lowerCamelCase : str = 0
__lowerCamelCase : Dict = -1
__lowerCamelCase : Tuple = 0
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
__lowerCamelCase : Optional[int] = count_n
__lowerCamelCase : List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Any = 0
def lowercase_ ( _lowerCamelCase: list[str] , _lowerCamelCase: list[str] ) -> list[list[int]]:
'''simple docstring'''
__lowerCamelCase : Dict = [[0 for x in range(len(_lowerCamelCase ) )] for x in range(len(_lowerCamelCase ) )]
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[str] = prime_implicants[i].count("_" )
for j in range(len(_lowerCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _lowerCamelCase ):
__lowerCamelCase : Dict = 1
return chart
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase : Any = int(input("Enter the no. of variables\n" ) )
__lowerCamelCase : List[str] = [
float(_lowerCamelCase )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
__lowerCamelCase : List[str] = decimal_to_binary(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : str = check(_lowerCamelCase )
print("Prime Implicants are:" )
print(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = prime_implicant_chart(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Any = selection(_lowerCamelCase , _lowerCamelCase )
print("Essential Prime Implicants are:" )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 64 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
a_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
a_ = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "weight" in name:
a_ = "weight"
elif "bias" in name:
a_ = "bias"
else:
a_ = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = SEWConfig()
if is_finetuned:
a_ = model.wav_encoder.wav_model.cfg
else:
a_ = model.cfg
a_ = fs_config.conv_bias
a_ = eval(fs_config.conv_feature_layers )
a_ = [x[0] for x in conv_layers]
a_ = [x[1] for x in conv_layers]
a_ = [x[2] for x in conv_layers]
a_ = "gelu"
a_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
a_ = 0.0
a_ = fs_config.activation_fn.name
a_ = fs_config.encoder_embed_dim
a_ = 0.02
a_ = fs_config.encoder_ffn_embed_dim
a_ = 1E-5
a_ = fs_config.encoder_layerdrop
a_ = fs_config.encoder_attention_heads
a_ = fs_config.conv_pos_groups
a_ = fs_config.conv_pos
a_ = len(SCREAMING_SNAKE_CASE__ )
a_ = fs_config.encoder_layers
a_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a_ = model.cfg
a_ = fs_config.final_dropout
a_ = fs_config.layerdrop
a_ = fs_config.activation_dropout
a_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a_ = fs_config.attention_dropout
a_ = fs_config.dropout_input
a_ = fs_config.dropout
a_ = fs_config.mask_channel_length
a_ = fs_config.mask_channel_prob
a_ = fs_config.mask_length
a_ = fs_config.mask_prob
a_ = "Wav2Vec2FeatureExtractor"
a_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Optional[Any]:
"""simple docstring"""
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
a_ = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
a_ = convert_config(model[0] , SCREAMING_SNAKE_CASE__ )
a_ = model[0].eval()
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if is_finetuned:
if dict_path:
a_ = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
a_ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
a_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
a_ = SEWForCTC(SCREAMING_SNAKE_CASE__ )
else:
a_ = SEWModel(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 243 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
return x + 2
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
snake_case_ = '''x = y'''
snake_case_ = {'''y''': 5}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} )
def snake_case__( self : Dict ) ->Optional[int]:
snake_case_ = '''y = add_two(x)'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__( self : Union[str, Any] ) ->Dict:
snake_case_ = '''x = 3'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3} )
def snake_case__( self : Optional[int] ) ->Optional[int]:
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Dict ) ->str:
snake_case_ = '''x = 3\ny = 5'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} )
def snake_case__( self : str ) ->Tuple:
snake_case_ = '''text = f\'This is x: {x}.\''''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def snake_case__( self : Optional[Any] ) ->List[str]:
snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} )
snake_case_ = {'''x''': 8}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} )
def snake_case__( self : str ) ->str:
snake_case_ = '''test_list = [x, add_two(x)]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [3, 5] )
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def snake_case__( self : Any ) ->List[Any]:
snake_case_ = '''y = x'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase )
assert result == 3
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} )
def snake_case__( self : Optional[int] ) ->Dict:
snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} )
snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
snake_case_ = {'''x''': 3}
snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase )
assert result == 5
self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def snake_case__( self : Optional[Any] ) ->int:
snake_case_ = '''x = 0\nfor i in range(3):\n x = i'''
snake_case_ = {}
snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase )
assert result == 2
self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} ) | 8 | 0 |
import os
import sys
import unittest
_UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_UpperCAmelCase = os.path.join(git_repo_path, 'src', 'transformers')
_UpperCAmelCase = '\n{0} = None\n'
_UpperCAmelCase = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_UpperCAmelCase = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(_snake_case )
__lowerCAmelCase : List[str] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(_snake_case , """tokenizers""" )
__lowerCAmelCase : Tuple = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(_snake_case , """tensorflow_text""" )
__lowerCAmelCase : Any = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(_snake_case , """sentencepiece_and_tokenizers""" )
__lowerCAmelCase : Dict = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(_snake_case , """sentencepiece_and_tensorflow_text""" )
__lowerCAmelCase : List[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(_snake_case , """sentencepiece_and_tokenizers_and_vision""" )
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
__lowerCAmelCase : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , _snake_case )
self.assertIn("""tensorflow_text""" , _snake_case )
self.assertIn("""sentencepiece_and_tokenizers""" , _snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def UpperCAmelCase__ ( self : Dict )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Tuple = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(_snake_case , """\nCONSTANT = None\n""" )
__lowerCAmelCase : Optional[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
_snake_case , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
__lowerCAmelCase : Union[str, Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
__lowerCAmelCase : List[str] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
__lowerCAmelCase : Any = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
__lowerCAmelCase : Dict = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , _snake_case ) | 232 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __lowercase ):
A_ = 'biogpt'
def __init__( self : int , _snake_case : Any=42384 , _snake_case : Any=1024 , _snake_case : List[Any]=24 , _snake_case : Any=16 , _snake_case : List[str]=4096 , _snake_case : Dict="gelu" , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : Tuple=1024 , _snake_case : Tuple=0.02 , _snake_case : Tuple=1E-12 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=True , _snake_case : Any=0.0 , _snake_case : Tuple=0.0 , _snake_case : str=1 , _snake_case : Dict=0 , _snake_case : str=2 , **_snake_case : Union[str, Any] , )->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Optional[int] = scale_embedding
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : str = layerdrop
__lowerCAmelCase : Dict = activation_dropout
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) | 232 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ = 16
lowercase_ = 32
def a__ ( snake_case , snake_case = 16 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case ):
# max_length=None => use the model max length (it's actually the default)
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : int = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__SCREAMING_SNAKE_CASE : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__SCREAMING_SNAKE_CASE : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__SCREAMING_SNAKE_CASE : Any = 16
elif accelerator.mixed_precision != "no":
__SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__SCREAMING_SNAKE_CASE : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ = mocked_dataloaders # noqa: F811
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__SCREAMING_SNAKE_CASE : str = 2
# Initialize accelerator
__SCREAMING_SNAKE_CASE : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__SCREAMING_SNAKE_CASE : List[str] = config['''lr''']
__SCREAMING_SNAKE_CASE : Optional[int] = int(config['''num_epochs'''] )
__SCREAMING_SNAKE_CASE : int = int(config['''seed'''] )
__SCREAMING_SNAKE_CASE : List[Any] = int(config['''batch_size'''] )
__SCREAMING_SNAKE_CASE : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case )
def inner_training_loop(snake_case ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__SCREAMING_SNAKE_CASE : int = model.to(accelerator.device )
# Instantiate optimizer
__SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = get_dataloaders(snake_case , snake_case )
# Instantiate scheduler
__SCREAMING_SNAKE_CASE : Tuple = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__SCREAMING_SNAKE_CASE : str = model(**snake_case )
__SCREAMING_SNAKE_CASE : List[str] = outputs.loss
accelerator.backward(snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**snake_case )
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 303 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 303 | 1 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = 256
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""melgan"""]
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ):
'''simple docstring'''
super().__init__()
# From MELGAN
__A : List[Any] = math.log(1e-5 ) # Matches MelGAN training.
__A : Any = 4.0 # Largest value for most examples
__A : Tuple = 128
self.register_modules(
notes_encoder=__lowerCamelCase , continuous_encoder=__lowerCamelCase , decoder=__lowerCamelCase , scheduler=__lowerCamelCase , melgan=__lowerCamelCase , )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=(-1.0, 1.0) , __lowerCamelCase=False ):
'''simple docstring'''
__A : str = output_range
if clip:
__A : Optional[Any] = torch.clip(__lowerCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
__A : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=(-1.0, 1.0) , __lowerCamelCase=False ):
'''simple docstring'''
__A : List[Any] = input_range
__A : Optional[Any] = torch.clip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if clip else outputs
# Scale to [0, 1].
__A : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = input_tokens > 0
__A : Dict = self.notes_encoder(
encoder_input_tokens=__lowerCamelCase , encoder_inputs_mask=__lowerCamelCase )
__A : Tuple = self.continuous_encoder(
encoder_inputs=__lowerCamelCase , encoder_inputs_mask=__lowerCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = noise_time
if not torch.is_tensor(__lowerCamelCase ):
__A : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
__A : int = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__A : Union[str, Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
__A : str = self.decoder(
encodings_and_masks=__lowerCamelCase , decoder_input_tokens=__lowerCamelCase , decoder_noise_time=__lowerCamelCase )
return logits
@torch.no_grad()
def __call__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = 100 , __lowerCamelCase = True , __lowerCamelCase = "numpy" , __lowerCamelCase = None , __lowerCamelCase = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
__A : Optional[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
__A : Tuple = np.zeros([1, 0, self.n_dims] , np.floataa )
__A : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCamelCase ):
if i == 0:
__A : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
__A : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__A : Union[str, Any] = ones
__A : Union[str, Any] = self.scale_features(
__lowerCamelCase , output_range=[-1.0, 1.0] , clip=__lowerCamelCase )
__A : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowerCamelCase , continuous_mask=__lowerCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__A : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__A : Tuple = self.decode(
encodings_and_masks=__lowerCamelCase , input_tokens=__lowerCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__A : Optional[Any] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
__A : str = self.scale_to_features(__lowerCamelCase , input_range=[-1.0, 1.0] )
__A : Any = mel[:1]
__A : Dict = mel.cpu().float().numpy()
__A : Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase , __lowerCamelCase )
logger.info('''Generated segment''' , __lowerCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
__A : Optional[int] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
__A : Union[str, Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCamelCase )
| 365 |
"""simple docstring"""
from math import factorial
def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 291 | 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def _A ( snake_case , snake_case=1.0 , snake_case=None , snake_case=None ) -> Optional[Any]:
if rng is None:
_lowercase : List[str] = global_rng
_lowercase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=400 , _UpperCamelCase=2000 , _UpperCamelCase=10 , _UpperCamelCase=160 , _UpperCamelCase=8 , _UpperCamelCase=0.0 , _UpperCamelCase=4000 , _UpperCamelCase=False , _UpperCamelCase=True , ):
"""simple docstring"""
_lowercase : int = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[Any] = min_seq_length
_lowercase : Union[str, Any] = max_seq_length
_lowercase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase : Union[str, Any] = padding_value
_lowercase : Dict = sampling_rate
_lowercase : Any = return_attention_mask
_lowercase : Union[str, Any] = do_normalize
_lowercase : int = feature_size
_lowercase : str = chunk_length
_lowercase : Any = hop_length
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
def _flatten(_UpperCamelCase ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
_lowercase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : List[Any] = feat_extract_first.save_pretrained(_UpperCamelCase )[0]
check_json_file_has_correct_format(_UpperCamelCase )
_lowercase : Tuple = self.feature_extraction_class.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = feat_extract_first.to_dict()
_lowercase : List[str] = feat_extract_second.to_dict()
_lowercase : Tuple = feat_extract_first.mel_filters
_lowercase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[int] = os.path.join(_UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_UpperCamelCase )
_lowercase : Any = self.feature_extraction_class.from_json_file(_UpperCamelCase )
_lowercase : List[Any] = feat_extract_first.to_dict()
_lowercase : str = feat_extract_second.to_dict()
_lowercase : List[str] = feat_extract_first.mel_filters
_lowercase : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_lowercase : int = feature_extractor(_UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_lowercase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test batched
_lowercase : Dict = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase : List[str] = np.asarray(_UpperCamelCase )
_lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : str = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test truncation required
_lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase : List[str] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
_lowercase : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase : Any = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs_truncated]
_lowercase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : Union[str, Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
_lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_lowercase : Optional[int] = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowercase : str = self._load_datasamples(1 )
_lowercase : Union[str, Any] = WhisperFeatureExtractor()
_lowercase : Any = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCamelCase , atol=1E-4 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : str = self._load_datasamples(1 )[0]
_lowercase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
_lowercase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(_UpperCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase ) - 1 ) < 1E-3 ) )
| 250 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = OpenAIGPTTokenizer
_a = OpenAIGPTTokenizerFast
_a = True
_a = False
def SCREAMING_SNAKE_CASE ( self: Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase :str = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowercase :int = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase :List[Any] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowercase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: str ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Any = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase :Optional[Any] = "lower"
lowercase :List[str] = ["low", "er</w>"]
lowercase :Tuple = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase :Optional[Any] = tokens + ["<unk>"]
lowercase :List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase :List[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
# Simple input
lowercase :int = "This is a simple input"
lowercase :int = ["This is a simple input 1", "This is a simple input 2"]
lowercase :List[str] = ("This is a simple input", "This is a pair")
lowercase :str = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(_lowerCAmelCase , tokenizer_r.encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
_lowerCAmelCase , tokenizer_r.batch_encode_plus , _lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" , )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase):
pass
| 158 |
def UpperCAmelCase__ ( ):
lowercase :List[str] = 0
for i in range(1, 1001 ):
total += i**i
return str(lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 158 | 1 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type , pa.intaa())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Tuple:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa())
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool") , type=Value("int64")))
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : str = pa.array(TypedSequence([1, 2, 3] , type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Tuple:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
__lowerCAmelCase : Optional[Any] = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64")))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32")))
self.assertEqual(arr.type , pa.intaa())
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64")))
self.assertEqual(arr.type , pa.string())
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Any:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
__lowerCAmelCase : int = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64")))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
__lowerCAmelCase : int = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64"))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64")))
self.assertEqual(arr.type , pa.string())
@require_pil
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
import PIL.Image
__lowerCAmelCase : Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5))
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_SCREAMING_SNAKE_CASE) as mock_cast_to_python_objects:
__lowerCAmelCase : Union[str, Any] = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image()))
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _SCREAMING_SNAKE_CASE)
self.assertFalse(kwargs["optimize_list_casting"])
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
__lowerCAmelCase : Dict = pa.BufferReader(__snake_case ) if isinstance(__snake_case ,pa.Buffer ) else pa.memory_map(__snake_case )
__lowerCAmelCase : Union[str, Any] = pa.ipc.open_stream(__snake_case )
__lowerCAmelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( __snake_case ,__snake_case ) -> Optional[Any]:
__lowerCAmelCase : List[str] = pa.BufferOutputStream()
__lowerCAmelCase : Optional[int] = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case ,schema=__snake_case ,writer_batch_size=__snake_case ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : List[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : Any = pa.BufferOutputStream()
__lowerCAmelCase : str = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=__snake_case ,features=__snake_case ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
__lowerCAmelCase , __lowerCAmelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
__lowerCAmelCase : Tuple = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : List[Any] = pa.ipc.open_stream(__snake_case )
__lowerCAmelCase : pa.Table = f.read_all()
__lowerCAmelCase : Any = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__snake_case )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case ,writer_batch_size=__snake_case ,hash_salt="split_name" ,check_duplicates=__snake_case ,) as writer:
with pytest.raises(__snake_case ):
writer.write({"col_1": "foo", "col_2": 1} ,key=[1, 2] )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case ,writer_batch_size=__snake_case ,hash_salt="split_name" ,check_duplicates=__snake_case ,) as writer:
with pytest.raises(__snake_case ):
writer.write({"col_1": "foo", "col_2": 1} ,key=10 )
writer.write({"col_1": "bar", "col_2": 2} ,key=10 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" ,[None, 2, 10] )
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case ,writer_batch_size=__snake_case ,hash_salt="split_name" ,check_duplicates=__snake_case ,) as writer:
writer.write({"col_1": "foo", "col_2": 1} ,key=1 )
writer.write({"col_1": "bar", "col_2": 2} ,key=2 )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : Dict = pa.BufferOutputStream()
__lowerCAmelCase : Union[str, Any] = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case ,schema=__snake_case ,writer_batch_size=__snake_case ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
__lowerCAmelCase , __lowerCAmelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : List[str] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : int = pa.BufferOutputStream()
__lowerCAmelCase : Tuple = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case ,schema=__snake_case ,writer_batch_size=__snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
__lowerCAmelCase , __lowerCAmelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : int = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" ,[None, 1, 10] )
@pytest.mark.parametrize(
"fields" ,[None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def _lowercase ( __snake_case ,__snake_case ) -> int:
__lowerCAmelCase : str = pa.BufferOutputStream()
__lowerCAmelCase : Dict = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case ,schema=__snake_case ,writer_batch_size=__snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
__lowerCAmelCase , __lowerCAmelCase : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
__lowerCAmelCase : int = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(__snake_case ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowercase ( ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase : int = {"col_1": pa.string(), "col_2": pa.intaa()}
__lowerCAmelCase : Tuple = os.path.join(__snake_case ,"test.arrow" )
with ArrowWriter(path=__snake_case ,schema=pa.schema(__snake_case ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
__lowerCAmelCase , __lowerCAmelCase : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__snake_case ,metadata=writer._schema.metadata )
_check_output(__snake_case ,1 )
def _lowercase ( __snake_case ) -> Dict:
if pa.types.is_list(__snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowercase ( __snake_case ,__snake_case ) -> Tuple:
if isinstance(lst[0] ,__snake_case ):
change_first_primitive_element_in_list(lst[0] ,__snake_case )
else:
__lowerCAmelCase : Tuple = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" ,[(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
__lowerCAmelCase : str = pa.array(TypedSequence(__snake_case ,optimized_int_type=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" ,[
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] ,)
@pytest.mark.parametrize("sequence" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[Any]:
# in range
__lowerCAmelCase : Dict = pa.array(OptimizedTypedSequence(__snake_case ,col=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
__lowerCAmelCase : List[str] = copy.deepcopy(__snake_case )
__lowerCAmelCase : str = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__snake_case ,__snake_case )
__lowerCAmelCase : Optional[int] = pa.array(OptimizedTypedSequence(__snake_case ,col=__snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : str = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=__snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowercase ( __snake_case ) -> Optional[int]:
__lowerCAmelCase : List[Any] = "mock://dataset-train.arrow"
with ArrowWriter(path=__snake_case ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(__snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__snake_case )
def _lowercase ( ) -> str:
__lowerCAmelCase : Tuple = pa.BufferOutputStream()
with ParquetWriter(stream=__snake_case ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
__lowerCAmelCase : str = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : pa.Table = pq.read_table(__snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" ,[False, True] )
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
import PIL.Image
__lowerCAmelCase : List[Any] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(__snake_case ,format="png" )
__lowerCAmelCase : Any = pa.BufferOutputStream()
with ParquetWriter(
stream=__snake_case ,features=Features({"image": Image()} ) ,embed_local_files=__snake_case ) as writer:
writer.write({"image": image_path} )
writer.finalize()
__lowerCAmelCase : int = pa.BufferReader(output.getvalue() )
__lowerCAmelCase : pa.Table = pq.read_table(__snake_case )
__lowerCAmelCase : List[Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] ,__snake_case )
with open(__snake_case ,"rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowercase ( ) -> Optional[int]:
__lowerCAmelCase : Dict = pa.schema([pa.field("col_1" ,pa.string() ,nullable=__snake_case )] )
__lowerCAmelCase : List[str] = pa.BufferOutputStream()
with ArrowWriter(stream=__snake_case ) as writer:
writer._build_writer(inferred_schema=__snake_case )
assert writer._schema == pa.schema([pa.field("col_1" ,pa.string() )] ) | 269 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env")
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Any=1) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: List[Any]) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
__lowerCAmelCase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE) | 269 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 280 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [text_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Union[str, Any] = {'text': 'string'}
UpperCAmelCase : List[Any] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader({'train': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = TextDatasetReader({'train': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : int = {split: text_path}
else:
UpperCAmelCase : int = 'train'
UpperCAmelCase : Any = {'train': text_path, 'test': text_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = 0
def snake_case ( self ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
__lowerCAmelCase = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
__lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("clip-base" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def snake_case ( self ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 57 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : Optional[Any]=30 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : str=0.9 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
UpperCAmelCase_= size if size is not None else {"""shortest_edge""": 30}
UpperCAmelCase_= crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= min_resolution
UpperCAmelCase_= max_resolution
UpperCAmelCase_= do_resize_and_center_crop
UpperCAmelCase_= size
UpperCAmelCase_= crop_pct
UpperCAmelCase_= crop_size
UpperCAmelCase_= do_normalize
UpperCAmelCase_= image_mean
UpperCAmelCase_= image_std
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : str = PoolFormerImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
UpperCAmelCase_= PoolFormerImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """crop_pct""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_std""" ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
UpperCAmelCase_= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase_= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_= prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_= image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCAmelCase_= image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 365 |
__A = 6_5521
def __a ( lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_= 1
UpperCAmelCase_= 0
for plain_chr in plain_text:
UpperCAmelCase_= (a + ord(lowerCAmelCase_ )) % MOD_ADLER
UpperCAmelCase_= (b + a) % MOD_ADLER
return (b << 16) | a
| 277 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__A ,__A ,__A = False, False, False
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[int] = None
_UpperCAmelCase :bool = True
_UpperCAmelCase :bool = True
_UpperCAmelCase :Optional[str] = None
# Automatically constructed
_UpperCAmelCase :ClassVar[str] = "dict"
_UpperCAmelCase :ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCAmelCase :str = field(default="Audio" ,init=_UpperCAmelCase ,repr=_UpperCAmelCase )
def __call__( self ):
return self.pa_type
def _snake_case ( self , _UpperCAmelCase ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase__: Optional[Any] = BytesIO()
sf.write(_UpperCAmelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase__: List[str] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
lowercase__: int = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 32767
lowercase__: List[Any] = BytesIO(bytes() )
sf.write(_UpperCAmelCase , _UpperCAmelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowercase__, lowercase__: Union[str, Any] = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowercase__: str = xsplitext(_UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowercase__: Dict = token_per_repo_id or {}
lowercase__: List[Any] = path.split('''::''' )[-1]
try:
lowercase__: Any = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
lowercase__: Any = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase__: List[str] = None
with xopen(_UpperCAmelCase , '''rb''' , use_auth_token=_UpperCAmelCase ) as f:
lowercase__, lowercase__: List[Any] = sf.read(_UpperCAmelCase )
else:
lowercase__, lowercase__: Tuple = sf.read(_UpperCAmelCase )
lowercase__: Tuple = array.T
if self.mono:
lowercase__: Dict = librosa.to_mono(_UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase__: Optional[Any] = librosa.resample(_UpperCAmelCase , orig_sr=_UpperCAmelCase , target_sr=self.sampling_rate )
lowercase__: Tuple = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _snake_case ( self ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _snake_case ( self , _UpperCAmelCase ):
if pa.types.is_string(storage.type ):
lowercase__: Tuple = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
lowercase__: Dict = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__: Tuple = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
lowercase__: Union[str, Any] = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowercase__: str = pa.array([Audio().encode_example(_UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase__: Dict = storage.field('''bytes''' )
else:
lowercase__: List[str] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase__: Optional[Any] = storage.field('''path''' )
else:
lowercase__: Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) , type=pa.string() )
lowercase__: Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
def _snake_case ( self , _UpperCAmelCase ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase ):
with xopen(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Tuple = f.read()
return bytes_
lowercase__: List[Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase__: List[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase__: Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase , self.pa_type )
| 177 | """simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
lowercase__: Optional[Any] = parent
lowercase__: Union[str, Any] = batch_size
lowercase__: int = image_size
lowercase__: Optional[Any] = num_channels
lowercase__: Optional[int] = embeddings_size
lowercase__: Dict = hidden_sizes
lowercase__: Union[str, Any] = depths
lowercase__: str = is_training
lowercase__: Optional[int] = use_labels
lowercase__: List[str] = hidden_act
lowercase__: Dict = num_labels
lowercase__: Any = scope
lowercase__: Optional[Any] = len(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[Any] = None
if self.use_labels:
lowercase__: Any = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: Optional[int] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = TFResNetModel(config=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: int = TFResNetForImageClassification(_UpperCAmelCase )
lowercase__: Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__: Optional[Any] = config_and_inputs
lowercase__: Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase :List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :Optional[Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
lowercase__: Union[str, Any] = TFResNetModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[Any] = model_class(_UpperCAmelCase )
lowercase__: str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: int = [*signature.parameters.keys()]
lowercase__: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = model_class(_UpperCAmelCase )
lowercase__: List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__: Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Union[str, Any] = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__: Tuple = layer_type
lowercase__: Any = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Dict = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ):
lowercase__: Tuple = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__: Any = self.default_image_processor
lowercase__: List[Any] = prepare_img()
lowercase__: List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='''tf''' )
# forward pass
lowercase__: Dict = model(**_UpperCAmelCase )
# verify the logits
lowercase__: int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__: Dict = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
| 177 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self , UpperCAmelCase = 1 , UpperCAmelCase = 100 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
lowercase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ = audio_length_in_s * self.unet.config.sample_rate
lowercase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
F' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
lowercase_ = int(UpperCAmelCase )
if sample_size % down_scale_factor != 0:
lowercase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
F' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
" process." )
lowercase_ = int(UpperCAmelCase )
lowercase_ = next(iter(self.unet.parameters() ) ).dtype
lowercase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase , device=audio.device )
lowercase_ = self.scheduler.timesteps.to(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowercase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=UpperCAmelCase )
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 192 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Optional[Any] = CTRLTokenizer
A__ : Optional[Any] = False
A__ : str = False
def A__ ( self: Optional[int] ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Dict = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
UpperCAmelCase_ : Optional[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def A__ ( self: Optional[int] ,**lowerCamelCase_: Any ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase_ )
def A__ ( self: int ,lowerCamelCase_: int ) -> str:
UpperCAmelCase_ : List[str] = """adapt react readapt apt"""
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[Any] = """adapt react readapt apt"""
UpperCAmelCase_ : Optional[int] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
UpperCAmelCase_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,lowerCamelCase_ )
| 345 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __magic_name__ (__lowercase , __lowercase ):
@register_to_config
def __init__( self , *,
_a = 4 , _a = 768 , _a , _a , ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.zeros(_a ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ = nn.Linear(_a , _a )
lowerCAmelCase_ = nn.Linear(_a , _a )
# parameters for encoder hidden states
lowerCAmelCase_ = clip_extra_context_tokens
lowerCAmelCase_ = nn.Linear(
_a , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ = nn.Linear(_a , _a )
lowerCAmelCase_ = nn.LayerNorm(_a )
def __a ( self , *, _a , _a , _a , _a ) -> Any:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ = image_embeddings.shape[0]
lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ = classifier_free_guidance_embeddings.expand(
_a , -1 )
lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ = self.embedding_proj(_a )
lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_a )
lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ = self.clip_extra_context_tokens_proj(_a )
lowerCAmelCase_ = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ = self.encoder_hidden_states_proj(_a )
lowerCAmelCase_ = self.text_encoder_hidden_states_norm(_a )
lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 22 |
def A():
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCamelCase__ = generate_large_matrix()
lowerCamelCase__ = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A(__a: list[list[int]] ):
assert all(row == sorted(__a , reverse=__a ) for row in grid )
assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) )
def A(__a: list[int] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(__a ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowerCAmelCase_ = (left + right) // 2
lowerCAmelCase_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowerCAmelCase_ = mid + 1
else:
lowerCAmelCase_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__a )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(grid[0] )
for i in range(len(__a ) ):
lowerCAmelCase_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__a ) * len(grid[0] )) - total
def A(__a: list[list[int]] ):
return len([number for row in grid for number in row if number < 0] )
def A(__a: list[list[int]] ):
lowerCAmelCase_ = 0
for row in grid:
for i, number in enumerate(__a ):
if number < 0:
total += len(__a ) - i
break
return total
def A():
from timeit import timeit
print("Running benchmarks" )
lowerCAmelCase_ = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 )
print(F"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 | 1 |
"""simple docstring"""
import sys
A_ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : Optional[Any] = 1
for digit in s:
product *= int(snake_case__ )
return product
def UpperCAmelCase__ (snake_case__ : str = N ):
"""simple docstring"""
_snake_case : Tuple = -sys.maxsize - 1
_snake_case : Optional[int] = n[:13]
_snake_case : Any = 13
while cur_index < len(snake_case__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_snake_case : str = substr[1:] + n[cur_index]
cur_index += 1
else:
_snake_case : str = max(snake_case__ , str_eval(snake_case__ ) )
_snake_case : int = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 64 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 1 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''AutoImageProcessor'''
lowerCamelCase = '''AutoTokenizer'''
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[int]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =self.image_processor
_lowerCAmelCase =False
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
_lowerCAmelCase =kwargs.pop("""images""" , __UpperCAmelCase )
_lowerCAmelCase =kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_lowerCAmelCase =args[0]
_lowerCAmelCase =args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCAmelCase =encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@contextmanager
def _lowerCAmelCase ( self ) -> int:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_lowerCAmelCase =True
_lowerCAmelCase =self.tokenizer
yield
_lowerCAmelCase =self.image_processor
_lowerCAmelCase =False
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=None ) -> Tuple:
if added_vocab is None:
_lowerCAmelCase =self.tokenizer.get_added_vocab()
_lowerCAmelCase ={}
while tokens:
_lowerCAmelCase =re.search(r"""<s_(.*?)>""" , __UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
_lowerCAmelCase =start_token.group(1 )
_lowerCAmelCase =re.search(rf'''</s_{key}>''' , __UpperCAmelCase , re.IGNORECASE )
_lowerCAmelCase =start_token.group()
if end_token is None:
_lowerCAmelCase =tokens.replace(__UpperCAmelCase , """""" )
else:
_lowerCAmelCase =end_token.group()
_lowerCAmelCase =re.escape(__UpperCAmelCase )
_lowerCAmelCase =re.escape(__UpperCAmelCase )
_lowerCAmelCase =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCAmelCase , re.IGNORECASE )
if content is not None:
_lowerCAmelCase =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_lowerCAmelCase =self.tokenajson(__UpperCAmelCase , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if value:
if len(__UpperCAmelCase ) == 1:
_lowerCAmelCase =value[0]
_lowerCAmelCase =value
else: # leaf nodes
_lowerCAmelCase =[]
for leaf in content.split(r"""<sep/>""" ):
_lowerCAmelCase =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_lowerCAmelCase =leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCAmelCase )
if len(output[key] ) == 1:
_lowerCAmelCase =output[key][0]
_lowerCAmelCase =tokens[tokens.find(__UpperCAmelCase ) + len(__UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCAmelCase , added_vocab=__UpperCAmelCase )
if len(__UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowerCAmelCase ( self ) -> str:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCAmelCase , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCAmelCase , )
return self.image_processor
| 341 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''cvt'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
_lowerCAmelCase =num_channels
_lowerCAmelCase =patch_sizes
_lowerCAmelCase =patch_stride
_lowerCAmelCase =patch_padding
_lowerCAmelCase =embed_dim
_lowerCAmelCase =num_heads
_lowerCAmelCase =depth
_lowerCAmelCase =mlp_ratio
_lowerCAmelCase =attention_drop_rate
_lowerCAmelCase =drop_rate
_lowerCAmelCase =drop_path_rate
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =cls_token
_lowerCAmelCase =qkv_projection_method
_lowerCAmelCase =kernel_qkv
_lowerCAmelCase =padding_kv
_lowerCAmelCase =stride_kv
_lowerCAmelCase =padding_q
_lowerCAmelCase =stride_q
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
| 341 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("dset_infos_dir")
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w") as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---")
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w") as f:
f.write("")
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w") as f:
f.write("{\"default\": {\"dataset_size\": 42}}")
__UpperCamelCase : Optional[Any] = DatasetInfosDict.from_directory(_lowerCamelCase)
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32")}) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : DatasetInfo) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = str(_lowerCamelCase)
dataset_info.write_to_directory(_lowerCamelCase)
__UpperCamelCase : List[str] = DatasetInfo.from_directory(_lowerCamelCase)
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , "dataset_info.json"))
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[Any] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32")}) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
__UpperCamelCase : List[Any] = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML)
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str))
__UpperCamelCase : Dict = yaml.safe_dump(_lowerCamelCase)
__UpperCamelCase : str = yaml.safe_load(_lowerCamelCase)
assert dataset_info_yaml_dict == reloaded
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : List[str] = DatasetInfo()
__UpperCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()}),
DatasetInfosDict({"my_config_name": DatasetInfo()}),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32")}) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
}),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42),
"v2": DatasetInfo(dataset_size=1_337),
}),
] , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : DatasetInfosDict) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = str(_lowerCamelCase)
dataset_infos_dict.write_to_directory(_lowerCamelCase)
__UpperCamelCase : Union[str, Any] = DatasetInfosDict.from_directory(_lowerCamelCase)
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__UpperCamelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__UpperCamelCase : Union[str, Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict())
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , "README.md")) | 232 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :str , a :str = None , a :uuid.UUID = None , a :Tuple=None , a :Optional[Any]=None ) -> str:
if not conversation_id:
__UpperCamelCase : Dict = uuid.uuida()
if past_user_inputs is None:
__UpperCamelCase : List[Any] = []
if generated_responses is None:
__UpperCamelCase : Any = []
__UpperCamelCase : uuid.UUID = conversation_id
__UpperCamelCase : List[str] = past_user_inputs
__UpperCamelCase : List[str] = generated_responses
__UpperCamelCase : Optional[str] = text
def __eq__( self :Optional[int] , a :Optional[int] ) -> Union[str, Any]:
if not isinstance(a , a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _lowerCamelCase ( self :Optional[int] , a :str , a :bool = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
__UpperCamelCase : Any = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
__UpperCamelCase : int = text
def _lowerCamelCase ( self :List[str] ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__UpperCamelCase : Dict = None
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[int]:
self.generated_responses.append(a )
def _lowerCamelCase ( self :int ) -> Optional[Any]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self :List[str] ) -> List[Any]:
__UpperCamelCase : Any = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
__UpperCamelCase : str = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
__lowercase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Tuple , *a :Tuple , **a :List[str] ) -> Tuple:
super().__init__(*a , **a )
if self.tokenizer.pad_token_id is None:
__UpperCamelCase : int = self.tokenizer.eos_token
def _lowerCamelCase ( self :Optional[int] , a :List[Any]=None , a :str=None , a :int=None , **a :str ) -> List[str]:
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[str] = {}
__UpperCamelCase : str = {}
if min_length_for_response is not None:
__UpperCamelCase : Optional[Any] = min_length_for_response
if minimum_tokens is not None:
__UpperCamelCase : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
__UpperCamelCase : List[Any] = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__UpperCamelCase : List[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(a )
return preprocess_params, forward_params, postprocess_params
def __call__( self :Dict , a :Union[Conversation, List[Conversation]] , a :List[Any]=0 , **a :Any ) -> Union[str, Any]:
__UpperCamelCase : Optional[int] = super().__call__(a , num_workers=a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
def _lowerCamelCase ( self :Tuple , a :Conversation , a :Dict=3_2 ) -> Dict[str, Any]:
if not isinstance(a , a ):
raise ValueError("ConversationalPipeline, expects Conversation as inputs" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method" )
if hasattr(self.tokenizer , "_build_conversation_input_ids" ):
__UpperCamelCase : str = self.tokenizer._build_conversation_input_ids(a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__UpperCamelCase : Optional[Any] = self._legacy_parse_and_tokenize(a )
if self.framework == "pt":
__UpperCamelCase : Dict = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__UpperCamelCase : Any = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _lowerCamelCase ( self :Any , a :List[Any] , a :Optional[Any]=1_0 , **a :Tuple ) -> List[str]:
__UpperCamelCase : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
__UpperCamelCase : Dict = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
__UpperCamelCase : Dict = max_length - minimum_tokens
__UpperCamelCase : Optional[int] = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
__UpperCamelCase : Dict = model_inputs["attention_mask"][:, -trim:]
__UpperCamelCase : List[str] = model_inputs.pop("conversation" )
__UpperCamelCase : Optional[int] = max_length
__UpperCamelCase : str = self.model.generate(**a , **a )
if self.model.config.is_encoder_decoder:
__UpperCamelCase : List[str] = 1
else:
__UpperCamelCase : Optional[int] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[int]=True ) -> Union[str, Any]:
__UpperCamelCase : List[str] = model_outputs["output_ids"]
__UpperCamelCase : Any = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
__UpperCamelCase : int = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(a )
return conversation
def _lowerCamelCase ( self :str , a :Conversation ) -> Dict:
__UpperCamelCase : int = self.tokenizer.eos_token_id
__UpperCamelCase : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) )
if len(a ) > self.tokenizer.model_max_length:
__UpperCamelCase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 232 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase :Optional[Any] = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 350 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCAmelCase :Optional[int] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__magic_name__ : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase , id=lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
if exitstatus == 5:
__magic_name__ : Any = 0
# Doctest custom flag to ignore output.
lowerCAmelCase :List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
lowerCAmelCase :Union[str, Any] = doctest.OutputChecker
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : Tuple , _A : str ) -> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _A , _A , _A )
lowerCAmelCase :Optional[Any] = CustomOutputChecker
lowerCAmelCase :int = HfDoctestModule
lowerCAmelCase :Any = HfDocTestParser | 275 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : List[str] = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class _UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__a : Optional[Any] = '''efficientnet'''
def __init__( self , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 6_00 , lowerCAmelCase__ = 2.0 , lowerCAmelCase__ = 3.1 , lowerCAmelCase__ = 8 , lowerCAmelCase__ = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCAmelCase__ = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCAmelCase__ = [] , lowerCAmelCase__ = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ = 0.25 , lowerCAmelCase__ = "swish" , lowerCAmelCase__ = 25_60 , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 0.001 , lowerCAmelCase__ = 0.99 , lowerCAmelCase__ = 0.5 , lowerCAmelCase__ = 0.2 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_a )
__lowercase = num_channels
__lowercase = image_size
__lowercase = width_coefficient
__lowercase = depth_coefficient
__lowercase = depth_divisor
__lowercase = kernel_sizes
__lowercase = in_channels
__lowercase = out_channels
__lowercase = depthwise_padding
__lowercase = strides
__lowercase = num_block_repeats
__lowercase = expand_ratios
__lowercase = squeeze_expansion_ratio
__lowercase = hidden_act
__lowercase = hidden_dim
__lowercase = pooling_type
__lowercase = initializer_range
__lowercase = batch_norm_eps
__lowercase = batch_norm_momentum
__lowercase = dropout_rate
__lowercase = drop_connect_rate
__lowercase = sum(_a ) * 4
class _UpperCamelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__a : List[Any] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return 1E-5 | 210 |
"""simple docstring"""
def a__ ( snake_case__ ) -> bool:
lowerCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( snake_case__ = 50_00 ) -> int:
lowerCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__ )]
for i, pentagonal_i in enumerate(snake_case__ ):
for j in range(snake_case__ , len(snake_case__ ) ):
lowerCamelCase = pentagonal_nums[j]
lowerCamelCase = pentagonal_i + pentagonal_j
lowerCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__ ) and is_pentagonal(snake_case__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 291 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCamelCase_( snake_case__: str ) -> str:
UpperCAmelCase__ = SwinConfig(image_size=1_92 )
if "base" in model_name:
UpperCAmelCase__ = 6
UpperCAmelCase__ = 1_28
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase__ = 12
UpperCAmelCase__ = 1_92
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
UpperCAmelCase__ = window_size
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
return config
def UpperCamelCase_( snake_case__: Optional[Any] ) -> Dict:
if "encoder.mask_token" in name:
UpperCAmelCase__ = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase__ = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase__ = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
UpperCAmelCase__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCAmelCase__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCAmelCase__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCAmelCase__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase__ = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
UpperCAmelCase__ = 'layernorm.weight'
if name == "encoder.norm.bias":
UpperCAmelCase__ = 'layernorm.bias'
if "decoder" in name:
pass
else:
UpperCAmelCase__ = 'swin.' + name
return name
def UpperCamelCase_( snake_case__: Tuple , snake_case__: Tuple ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(snake_case__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase__ = key.split('.' )
UpperCAmelCase__ = int(key_split[2] )
UpperCAmelCase__ = int(key_split[4] )
UpperCAmelCase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[
dim : dim * 2, :
]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[
:dim
]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[
-dim:
]
else:
UpperCAmelCase__ = val
return orig_state_dict
def UpperCamelCase_( snake_case__: Tuple , snake_case__: List[str] , snake_case__: Tuple , snake_case__: Dict ) -> Dict:
UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model']
UpperCAmelCase__ = get_swin_config(snake_case__ )
UpperCAmelCase__ = SwinForMaskedImageModeling(snake_case__ )
model.eval()
UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
UpperCAmelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase__ = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
UpperCAmelCase__ = image_processor(images=snake_case__ , return_tensors='pt' )
with torch.no_grad():
UpperCAmelCase__ = model(**snake_case__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_UpperCamelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 |
from ...configuration_utils import PretrainedConfig
_UpperCamelCase = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """tapas"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=1024 , __a=[3, 256, 256, 2, 256, 256, 10] , __a=0.02 , __a=1E-1_2 , __a=0 , __a=10.0 , __a=0 , __a=1.0 , __a=None , __a=1.0 , __a=False , __a=None , __a=1.0 , __a=1.0 , __a=False , __a=False , __a="ratio" , __a=None , __a=None , __a=64 , __a=32 , __a=False , __a=True , __a=False , __a=False , __a=True , __a=False , __a=None , __a=None , **__a , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , **__a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_sizes
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase__ = positive_label_weight
UpperCAmelCase__ = num_aggregation_labels
UpperCAmelCase__ = aggregation_loss_weight
UpperCAmelCase__ = use_answer_as_supervision
UpperCAmelCase__ = answer_loss_importance
UpperCAmelCase__ = use_normalized_answer_loss
UpperCAmelCase__ = huber_loss_delta
UpperCAmelCase__ = temperature
UpperCAmelCase__ = aggregation_temperature
UpperCAmelCase__ = use_gumbel_for_cells
UpperCAmelCase__ = use_gumbel_for_aggregation
UpperCAmelCase__ = average_approximation_function
UpperCAmelCase__ = cell_selection_preference
UpperCAmelCase__ = answer_loss_cutoff
UpperCAmelCase__ = max_num_rows
UpperCAmelCase__ = max_num_columns
UpperCAmelCase__ = average_logits_per_cell
UpperCAmelCase__ = select_one_column
UpperCAmelCase__ = allow_empty_column_selection
UpperCAmelCase__ = init_cell_selection_weights_to_zero
UpperCAmelCase__ = reset_position_index_per_cell
UpperCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase__ = aggregation_labels
UpperCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __a ):
UpperCAmelCase__ = {int(__a ): v for k, v in aggregation_labels.items()}
| 335 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( __magic_name__ ):
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "num_encoder_blocks" ) )
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=64 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=[2, 2, 2, 2] , _lowerCAmelCase=[8, 4, 2, 1] , _lowerCAmelCase=[16, 32, 64, 128] , _lowerCAmelCase=[1, 4, 8, 16] , _lowerCAmelCase=[1, 2, 4, 8] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=None , ) -> Union[str, Any]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = num_encoder_blocks
_lowerCAmelCase = sr_ratios
_lowerCAmelCase = depths
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = downsampling_rates
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
def _snake_case ( self ) -> int:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> List[str]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = SegformerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = _lowerCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SegformerForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = 1
_lowerCAmelCase = SegformerForSemanticSegmentation(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[str] = False
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = SegformerModelTester(self )
_lowerCAmelCase = SegformerConfigTester(self , config_class=_lowerCAmelCase )
def _snake_case ( self ) -> int:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowerCAmelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _snake_case ( self ) -> int:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _snake_case ( self ) -> Tuple:
pass
def _snake_case ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
_lowerCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
_lowerCAmelCase = (self.model_tester.image_size // 32) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
_lowerCAmelCase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
_lowerCAmelCase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first attentions (first block, first layer)
_lowerCAmelCase = (self.model_tester.image_size // 4) ** 2
_lowerCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _snake_case ( self ) -> int:
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
if not self.model_tester.is_training:
return
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCAmelCase ):
continue
_lowerCAmelCase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.train()
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
_lowerCAmelCase = model(**_lowerCAmelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self ) -> Optional[Any]:
pass
@slow
def _snake_case ( self ) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SegformerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_lowerCAmelCase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_lowerCAmelCase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-1 ) )
@slow
def _snake_case ( self ) -> Tuple:
# only resize + normalize
_lowerCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCAmelCase , align=_lowerCAmelCase , do_random_crop=_lowerCAmelCase )
_lowerCAmelCase = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_lowerCAmelCase )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = encoded_inputs.pixel_values.to(_lowerCAmelCase )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
_lowerCAmelCase = outputs.logits.detach().cpu()
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(500, 300)] )
_lowerCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
_lowerCAmelCase = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
_lowerCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 158 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowerCAmelCase = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 158 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 358 | import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = 48
UpperCamelCase = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = 60
UpperCamelCase = [6, 6, 6, 6]
UpperCamelCase = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = 4
UpperCamelCase = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 126
UpperCamelCase = 7
UpperCamelCase = 2_5_5.0
UpperCamelCase = """"""
return config
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
UpperCamelCase = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
UpperCamelCase = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
UpperCamelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCamelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCamelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCamelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCamelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCamelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
UpperCamelCase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
UpperCamelCase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
UpperCamelCase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
UpperCamelCase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
UpperCamelCase = """layernorm.weight"""
if name == "norm.bias":
UpperCamelCase = """layernorm.bias"""
if "conv_first" in name:
UpperCamelCase = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCamelCase = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCamelCase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
UpperCamelCase = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
UpperCamelCase = name.replace("""upsample.2""" , """upsample.convolution_1""" )
UpperCamelCase = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
UpperCamelCase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
UpperCamelCase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
UpperCamelCase = """swin2sr.""" + name
return name
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
UpperCamelCase = key.split(""".""" )
UpperCamelCase = int(key_split[1] )
UpperCamelCase = int(key_split[4] )
UpperCamelCase = config.embed_dim
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
pass
else:
UpperCamelCase = val
return orig_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_config(UpperCamelCase_ )
UpperCamelCase = SwinaSRForImageSuperResolution(UpperCamelCase_ )
model.eval()
UpperCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" )
UpperCamelCase = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(UpperCamelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCamelCase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" )
UpperCamelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCamelCase = 126 if """Jpeg""" in checkpoint_url else 256
UpperCamelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
UpperCamelCase = transforms(UpperCamelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCamelCase = model(UpperCamelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 512, 512] )
UpperCamelCase = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCamelCase = torch.Size([1, 3, 1024, 1024] )
UpperCamelCase = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-3 )
print("""Looks ok!""" )
UpperCamelCase = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
UpperCamelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 165 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A__ , A__ , A__ : Union[str, Any] =False, False, False
@dataclass
class UpperCAmelCase :
_lowercase: Optional[int] = None
_lowercase: bool = True
_lowercase: bool = True
_lowercase: Optional[str] = None
# Automatically constructed
_lowercase: ClassVar[str] = "dict"
_lowercase: ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_lowercase: str = field(default='''Audio''' , init=snake_case_ , repr=snake_case_ )
def __call__( self : int ) -> int:
return self.pa_type
def lowercase__ ( self : List[Any] , __snake_case : Union[str, bytes, dict] ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__snake_case , __snake_case ):
return {"bytes": None, "path": value}
elif isinstance(__snake_case , __snake_case ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCAmelCase = BytesIO()
sf.write(__snake_case , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
_lowerCAmelCase = BytesIO(bytes() )
sf.write(__snake_case , __snake_case , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowercase__ ( self : List[Any] , __snake_case : dict , __snake_case : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
_lowerCAmelCase , _lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
_lowerCAmelCase = xsplitext(__snake_case )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
_lowerCAmelCase = token_per_repo_id or {}
_lowerCAmelCase = path.split("""::""" )[-1]
try:
_lowerCAmelCase = string_to_dict(__snake_case , config.HUB_DATASETS_URL )["""repo_id"""]
_lowerCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCAmelCase = None
with xopen(__snake_case , """rb""" , use_auth_token=__snake_case ) as f:
_lowerCAmelCase , _lowerCAmelCase = sf.read(__snake_case )
else:
_lowerCAmelCase , _lowerCAmelCase = sf.read(__snake_case )
_lowerCAmelCase = array.T
if self.mono:
_lowerCAmelCase = librosa.to_mono(__snake_case )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCAmelCase = librosa.resample(__snake_case , orig_sr=__snake_case , target_sr=self.sampling_rate )
_lowerCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowercase__ ( self : List[str] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def lowercase__ ( self : int , __snake_case : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_lowerCAmelCase = pa.array([None] * len(__snake_case ) , type=pa.binary() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCAmelCase = pa.array([None] * len(__snake_case ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
_lowerCAmelCase = pa.array([Audio().encode_example(__snake_case ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_lowerCAmelCase = storage.field("""bytes""" )
else:
_lowerCAmelCase = pa.array([None] * len(__snake_case ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_lowerCAmelCase = storage.field("""path""" )
else:
_lowerCAmelCase = pa.array([None] * len(__snake_case ) , type=pa.string() )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__snake_case , self.pa_type )
def lowercase__ ( self : Any , __snake_case : pa.StructArray ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__snake_case : List[Any] ):
with xopen(__snake_case , """rb""" ) as f:
_lowerCAmelCase = f.read()
return bytes_
_lowerCAmelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCAmelCase = pa.array(
[os.path.basename(__snake_case ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__snake_case , self.pa_type )
| 70 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Dict , _lowercase : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowercase )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ):
__UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , only_pretrain_model=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , torchscript=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , fpaa=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
# set architectures equal to `None`
__UpperCAmelCase = None
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Tuple ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Any ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : str ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : int ):
__UpperCAmelCase = '''sshleifer/tinier_bart'''
__UpperCAmelCase = AutoConfig.from_pretrained(_lowercase )
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase , configs=[config] )
__UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , save_to_csv=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowercase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowercase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowercase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowercase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowercase , '''env.csv''' ) , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowercase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowercase , '''env.csv''' ) ).exists() )
def a ( self : List[Any] ):
__UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowercase : str ):
self.assertTrue(hasattr(_lowercase , '''sequential''' ) )
self.assertTrue(hasattr(_lowercase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowercase , '''current''' ) )
self.assertTrue(hasattr(_lowercase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowercase , inference=_lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowercase , '''log.txt''' ) , log_print=_lowercase , trace_memory_line_by_line=_lowercase , multi_process=_lowercase , )
__UpperCAmelCase = PyTorchBenchmark(_lowercase )
__UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowercase , '''log.txt''' ) ).exists() )
| 332 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : int | float | str , snake_case__ : int | float | str ):
"""simple docstring"""
if nth_term == "":
return [""]
_snake_case : Any = int(snake_case__ )
_snake_case : Optional[Any] = int(snake_case__ )
_snake_case : list[str] = []
for temp in range(int(snake_case__ ) ):
series.append(F"1 / {pow(temp + 1 , int(snake_case__ ) )}" if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ = int(input('''Enter the last number (nth term) of the P-Series'''))
A_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 132 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 132 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase_ ):
def __init__( self : str , *A : Optional[Any] , **A : List[str]) -> None:
"""simple docstring"""
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 339 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 0 |
import argparse
import os
import re
import packaging.version
lowercase_ = "examples/"
lowercase_ = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase_ = "README.md"
def __lowerCAmelCase ( _A : Tuple , _A : Tuple , _A : Optional[int] ):
'''simple docstring'''
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : int = f.read()
__snake_case : int = REPLACE_PATTERNS[pattern]
__snake_case : Tuple = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _A : Dict ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" )
def __lowerCAmelCase ( _A : Union[str, Any] , _A : List[str]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not patch:
update_version_in_examples(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = """🤗 Transformers currently provides the following architectures"""
__snake_case : Optional[Any] = """1. Want to contribute a new model?"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__snake_case : str = f.readlines()
# Find the start of the list.
__snake_case : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__snake_case : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
__snake_case : str = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
__snake_case : Any = f.read()
__snake_case : Tuple = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0]
return packaging.version.parse(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _A : List[Any]=False ):
'''simple docstring'''
__snake_case : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
__snake_case : str = default_version.base_version
elif patch:
__snake_case : Tuple = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__snake_case : Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__snake_case : Union[str, Any] = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case : Union[str, Any] = get_version()
__snake_case : int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__snake_case : Optional[int] = current_version.base_version
# Check with the user we got that right.
__snake_case : Dict = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
__snake_case : Optional[Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__SCREAMING_SNAKE_CASE )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 353 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowercase_ = None
lowercase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowercase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class SCREAMING_SNAKE_CASE__ :
A : bool = True
A : Optional[str] = None
# Automatically constructed
A : ClassVar[str] = "PIL.Image.Image"
A : ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A : str = field(default="Image" , init=__UpperCamelCase , repr=__UpperCamelCase )
def __call__( self : Any ):
return self.pa_type
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : str = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def snake_case__ ( self : List[str] , _lowerCAmelCase : dict , _lowerCAmelCase : Dict=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
__snake_case : Tuple = {}
__snake_case , __snake_case : str = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_lowerCAmelCase ):
__snake_case : str = PIL.Image.open(_lowerCAmelCase )
else:
__snake_case : List[str] = path.split("""::""" )[-1]
try:
__snake_case : Dict = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
__snake_case : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
__snake_case : List[Any] = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
__snake_case : Union[str, Any] = BytesIO(f.read() )
__snake_case : Dict = PIL.Image.open(bytes_ )
else:
__snake_case : Optional[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case__ ( self : Union[str, Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def snake_case__ ( self : Optional[int] , _lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
__snake_case : List[str] = storage.field("""bytes""" )
else:
__snake_case : List[Any] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
__snake_case : Optional[int] = storage.field("""path""" )
else:
__snake_case : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__snake_case : Optional[Any] = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__snake_case : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
__snake_case : List[str] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase : Tuple ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
__snake_case : Optional[int] = f.read()
return bytes_
__snake_case : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Optional[Any] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
__snake_case : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__snake_case : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
__snake_case : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
__snake_case : Union[str, Any] = image.format
else:
__snake_case : List[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__SCREAMING_SNAKE_CASE , format=__SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : "PIL.Image.Image" ):
'''simple docstring'''
if hasattr(__SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
__snake_case : List[Any] = array.dtype
__snake_case : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
__snake_case : Dict = dtype.kind
__snake_case : Union[str, Any] = dtype.itemsize
__snake_case : Tuple = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__snake_case : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__snake_case : List[str] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__snake_case : int = dtype_byteorder + dtype_kind + str(__SCREAMING_SNAKE_CASE )
__snake_case : Any = np.dtype(__SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
__snake_case : Optional[int] = PIL.Image.fromarray(array.astype(__SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(__SCREAMING_SNAKE_CASE )}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
__snake_case , __snake_case : Any = first_non_null_value(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
__snake_case : int = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__snake_case : List[str] = no_op_if_value_is_null(__SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(__SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs
| 20 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : Tuple = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : List[str] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : Union[str, Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Optional[Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Any = self.dummy_sample
UpperCAmelCase_ : Union[str, Any] = 0.1 * sample
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : int = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Any = 10
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : List[str] = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : str = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Dict = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : int = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : List[str] = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Dict = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Union[str, Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : Tuple = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : Any = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config()
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.full_loop()
UpperCAmelCase_ : List[str] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Dict = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Optional[int] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : str = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 61 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 1
while repunit:
lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : List[str] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__magic_name__ = logging.get_logger(__name__)
@add_end_docstrings(__a )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
requires_backends(self , """vision""")
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING)
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if prompt is not None:
__SCREAMING_SNAKE_CASE = prompt
if generate_kwargs is not None:
__SCREAMING_SNAKE_CASE = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__SCREAMING_SNAKE_CASE = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""")
__SCREAMING_SNAKE_CASE = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase__ , **lowerCAmelCase__):
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
__SCREAMING_SNAKE_CASE = load_image(lowerCAmelCase__)
if prompt is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
raise ValueError(
f"Received an invalid text input, got - {type(lowerCAmelCase__)} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""")
__SCREAMING_SNAKE_CASE = self.model.config.model_type
if model_type == "git":
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework)
__SCREAMING_SNAKE_CASE = self.tokenizer(text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__).input_ids
__SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCAmelCase__).unsqueeze(0)
model_inputs.update({"""input_ids""": input_ids})
elif model_type == "pix2struct":
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase__ , header_text=lowerCAmelCase__ , return_tensors=self.framework)
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework)
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework)
model_inputs.update(lowerCAmelCase__)
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation")
else:
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework)
if self.model.config.model_type == "git" and prompt is None:
__SCREAMING_SNAKE_CASE = None
return model_inputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCAmelCase__)
and all(x is None for x in model_inputs["""input_ids"""])
):
__SCREAMING_SNAKE_CASE = None
if generate_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name)
__SCREAMING_SNAKE_CASE = self.model.generate(lowerCAmelCase__ , **lowerCAmelCase__ , **lowerCAmelCase__)
return model_outputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
for output_ids in model_outputs:
__SCREAMING_SNAKE_CASE = {
"""generated_text""": self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , )
}
records.append(lowerCAmelCase__)
return records
| 255 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 255 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """"""
_lowerCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase : str = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any , snake_case_ : str = "" , snake_case_ : Optional[str] = None , snake_case_ : Optional[dict] = None , **snake_case_ : str ):
super().__init__(self , **snake_case_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_UpperCAmelCase = fsspec.open(
snake_case_ , mode="rb" , protocol=snake_case_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_UpperCAmelCase = os.path.basename(self.file.path.split("::" )[0] )
_UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
_UpperCAmelCase = None
@classmethod
def lowercase ( cls : int , snake_case_ : str ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(snake_case_ ).lstrip("/" )
def lowercase ( self : Union[str, Any] ):
if self.dir_cache is None:
_UpperCAmelCase = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
_UpperCAmelCase = {f["name"]: f}
def lowercase ( self : Any , snake_case_ : str ):
return self.file.open().read()
def lowercase ( self : Optional[int] , snake_case_ : str , snake_case_ : str = "rb" , snake_case_ : Tuple=None , snake_case_ : Dict=True , snake_case_ : Optional[int]=None , **snake_case_ : Tuple , ):
_UpperCAmelCase = self._strip_protocol(snake_case_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """bz2"""
_lowerCamelCase : Tuple = """bz2"""
_lowerCamelCase : Union[str, Any] = """.bz2"""
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = """gzip"""
_lowerCamelCase : Optional[Any] = """gzip"""
_lowerCamelCase : str = """.gz"""
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = """lz4"""
_lowerCamelCase : Any = """lz4"""
_lowerCamelCase : List[Any] = """.lz4"""
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = """xz"""
_lowerCamelCase : Optional[int] = """xz"""
_lowerCamelCase : Optional[Any] = """.xz"""
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """zstd"""
_lowerCamelCase : Optional[Any] = """zstd"""
_lowerCamelCase : Optional[int] = """.zst"""
def __init__( self : List[str] , snake_case_ : str , snake_case_ : str = "rb" , snake_case_ : Optional[str] = None , snake_case_ : Optional[dict] = None , snake_case_ : int = DEFAULT_BLOCK_SIZE , **snake_case_ : Tuple , ):
super().__init__(
fo=snake_case_ , mode=snake_case_ , target_protocol=snake_case_ , target_options=snake_case_ , block_size=snake_case_ , **snake_case_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_UpperCAmelCase = self.file.__enter__
class A_ :
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = file_
def __enter__( self : Optional[Any] ):
self._file.__enter__()
return self
def __exit__( self : List[str] , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
self._file.__exit__(*snake_case_ , **snake_case_ )
def __iter__( self : Dict ):
return iter(self._file )
def lowercase ( self : Optional[Any] ):
return next(self._file )
def __getattr__( self : List[Any] , snake_case_ : Any ):
return getattr(self._file , snake_case_ )
def fixed_enter(*snake_case_ : List[str] , **snake_case_ : int ):
return WrappedFile(_enter(*snake_case_ , **snake_case_ ) )
_UpperCAmelCase = fixed_enter
| 22 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
_UpperCAmelCase = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def snake_case_ (__A : Tuple ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ (__A : str , __A : List[Any] ) -> List[Any]:
__lowerCAmelCase : int = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCAmelCase : Dict = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
__lowerCAmelCase : List[Any] = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
__lowerCAmelCase : Dict = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
__lowerCAmelCase : List[Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
__lowerCAmelCase : Tuple = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
__lowerCAmelCase : Union[str, Any] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
__lowerCAmelCase : List[Any] = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
__lowerCAmelCase : Dict = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
__lowerCAmelCase : List[str] = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
__lowerCAmelCase : Any = key.replace("""image_encoder.module""" , """flava.image_model""" )
__lowerCAmelCase : Optional[int] = key.replace("""text_encoder.module""" , """flava.text_model""" )
__lowerCAmelCase : str = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
__lowerCAmelCase : Dict = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
__lowerCAmelCase : Dict = key.replace("""text_projection""" , """flava.text_projection""" )
__lowerCAmelCase : Dict = key.replace("""image_projection""" , """flava.image_projection""" )
__lowerCAmelCase : str = value.float()
for key, value in codebook_state_dict.items():
__lowerCAmelCase : Optional[int] = value
return upgrade
@torch.no_grad()
def snake_case_ (__A : List[str] , __A : str , __A : str , __A : List[str]=None ) -> Union[str, Any]:
if config_path is not None:
__lowerCAmelCase : Optional[Any] = FlavaConfig.from_pretrained(__A )
else:
__lowerCAmelCase : List[Any] = FlavaConfig()
__lowerCAmelCase : List[str] = FlavaForPreTraining(__A ).eval()
__lowerCAmelCase : List[Any] = convert_dalle_checkpoint(__A , __A , save_checkpoint=__A )
if os.path.exists(__A ):
__lowerCAmelCase : List[str] = torch.load(__A , map_location="""cpu""" )
else:
__lowerCAmelCase : Any = torch.hub.load_state_dict_from_url(__A , map_location="""cpu""" )
__lowerCAmelCase : Optional[int] = upgrade_state_dict(__A , __A )
hf_model.load_state_dict(__A )
__lowerCAmelCase : str = hf_model.state_dict()
__lowerCAmelCase : Optional[int] = count_parameters(__A )
__lowerCAmelCase : Any = count_parameters(__A ) + count_parameters(__A )
assert torch.allclose(__A , __A , atol=1e-3 )
hf_model.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 139 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 139 | 1 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["image_processor", "tokenizer"]
lowerCAmelCase_ = "AutoImageProcessor"
lowerCAmelCase_ = "AutoTokenizer"
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
_snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
_snake_case = kwargs.pop("""feature_extractor""" )
_snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
_snake_case = self.image_processor
_snake_case = False
def __call__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
_snake_case = kwargs.pop("""images""" , UpperCAmelCase )
_snake_case = kwargs.pop("""text""" , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
_snake_case = args[0]
_snake_case = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_snake_case = self.image_processor(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if text is not None:
_snake_case = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_snake_case = encodings["""input_ids"""]
return inputs
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Any:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@contextmanager
def lowercase (self ) -> Any:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_snake_case = True
_snake_case = self.tokenizer
yield
_snake_case = self.image_processor
_snake_case = False
def lowercase (self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=None ) -> int:
if added_vocab is None:
_snake_case = self.tokenizer.get_added_vocab()
_snake_case = {}
while tokens:
_snake_case = re.search(R"""<s_(.*?)>""" , UpperCAmelCase , re.IGNORECASE )
if start_token is None:
break
_snake_case = start_token.group(1 )
_snake_case = re.search(Rf"""</s_{key}>""" , UpperCAmelCase , re.IGNORECASE )
_snake_case = start_token.group()
if end_token is None:
_snake_case = tokens.replace(UpperCAmelCase , """""" )
else:
_snake_case = end_token.group()
_snake_case = re.escape(UpperCAmelCase )
_snake_case = re.escape(UpperCAmelCase )
_snake_case = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCAmelCase , re.IGNORECASE )
if content is not None:
_snake_case = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_snake_case = self.tokenajson(UpperCAmelCase , is_inner_value=UpperCAmelCase , added_vocab=UpperCAmelCase )
if value:
if len(UpperCAmelCase ) == 1:
_snake_case = value[0]
_snake_case = value
else: # leaf nodes
_snake_case = []
for leaf in content.split(R"""<sep/>""" ):
_snake_case = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_snake_case = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase )
if len(output[key] ) == 1:
_snake_case = output[key][0]
_snake_case = tokens[tokens.find(UpperCAmelCase ) + len(UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase , added_vocab=UpperCAmelCase )
if len(UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase (self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , )
return self.image_processor_class
@property
def lowercase (self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , )
return self.image_processor | 341 |
'''simple docstring'''
__lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_snake_case = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = CustomTokenizer
pass
| 365 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__a = TypeVar("T")
__a = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : T | None , snake_case_ : U | None ):
snake_case__ : Dict = key
snake_case__ : Tuple = val
snake_case__ : DoubleLinkedListNode[T, U] | None = None
snake_case__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ):
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ , snake_case__ : Tuple = self.rear, self.head
def __repr__( self : List[str] ):
snake_case__ : Dict = ["""DoubleLinkedList"""]
snake_case__ : Any = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
snake_case__ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : DoubleLinkedListNode[T, U] ):
snake_case__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case__ : Tuple = node
snake_case__ : Optional[Any] = previous
snake_case__ : int = node
snake_case__ : List[str] = self.rear
def lowerCamelCase ( self : List[str] , snake_case_ : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
snake_case__ : Optional[int] = node.next
snake_case__ : List[str] = node.prev
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
lowercase = {}
def __init__( self : str , snake_case_ : int ):
snake_case__ : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case__ : Tuple = capacity
snake_case__ : Dict = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ):
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Optional[int] , snake_case_ : T ):
return key in self.cache
def lowerCamelCase ( self : Dict , snake_case_ : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case__ : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case__ : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def lowerCamelCase ( self : List[str] , snake_case_ : T , snake_case_ : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case__ : Tuple = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case__ : Optional[int] = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case__ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case__ : int = value
self.list.add(snake_case_ )
@classmethod
def lowerCamelCase ( cls : Dict , snake_case_ : int = 128 ):
def cache_decorator_inner(snake_case_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case__ : Union[str, Any] = LRUCache(snake_case_ )
snake_case__ : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case__ : Any = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , """cache_info""" , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ):
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = mock_training_loop_function('''hello''' )
self.assertListEqual(__lowerCamelCase , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def lowercase_ ( self : str ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__lowerCamelCase : Optional[Any] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def lowercase_ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def lowercase_ ( self : Union[str, Any] ) -> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__lowerCamelCase : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 314 | 0 |
import unittest
import numpy as np
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__ = None, ) -> List[Any]:
__UpperCAmelCase : Dict = np.shape(snake_case__ )
__UpperCAmelCase : Any = np.shape(snake_case__ )
__UpperCAmelCase : Optional[Any] = np.shape(snake_case__ )
if shape_a[0] != shape_b[0]:
__UpperCAmelCase : Any = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(snake_case__ )
if shape_b[1] != shape_c[1]:
__UpperCAmelCase : str = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(snake_case__ )
__UpperCAmelCase : Tuple = pseudo_inv
if a_inv is None:
try:
__UpperCAmelCase : Optional[Any] = np.linalg.inv(snake_case__ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: Any ) -> None:
__UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : Dict = np.array([[2, 1], [6, 3]] )
__UpperCAmelCase : Optional[Any] = schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : List[str] = np.block([[a, b], [b.T, c]] )
__UpperCAmelCase : Any = np.linalg.det(lowerCamelCase__ )
__UpperCAmelCase : Tuple = np.linalg.det(lowerCamelCase__ )
__UpperCAmelCase : str = np.linalg.det(lowerCamelCase__ )
self.assertAlmostEqual(lowerCamelCase__ , det_a * det_s )
def _lowerCamelCase ( self: Dict ) -> None:
__UpperCAmelCase : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : Optional[Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase__ ):
schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self: Union[str, Any] ) -> None:
__UpperCAmelCase : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__UpperCAmelCase : str = np.array([[0, 3], [3, 0], [2, 3]] )
__UpperCAmelCase : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase__ ):
schur_complement(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 357 | import numpy as np
import datasets
_snake_case = '''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
_snake_case = '''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
_snake_case = '''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric("mahalanobis")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _lowerCamelCase ( self: List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: int , __lowerCamelCase: Union[str, Any] ) -> List[str]:
# convert to numpy arrays
__UpperCAmelCase : int = np.array(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__UpperCAmelCase : str = X - np.mean(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = np.cov(reference_distribution.T )
try:
__UpperCAmelCase : int = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
__UpperCAmelCase : Optional[int] = np.linalg.pinv(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = np.dot(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 342 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( UpperCAmelCase_ : Any ):
A__ = SwinConfig(image_size=192 )
if "base" in model_name:
A__ = 6
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
elif "large" in model_name:
A__ = 12
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
A__ = window_size
A__ = embed_dim
A__ = depths
A__ = num_heads
return config
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
if "encoder.mask_token" in name:
A__ = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
A__ = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
A__ = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
A__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
A__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
A__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
A__ = """layernorm.weight"""
if name == "encoder.norm.bias":
A__ = """layernorm.bias"""
if "decoder" in name:
pass
else:
A__ = """swin.""" + name
return name
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(UpperCAmelCase_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
A__ = key.split(""".""" )
A__ = int(key_split[2] )
A__ = int(key_split[4] )
A__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[
:dim
]
A__ = val[
dim : dim * 2
]
A__ = val[
-dim:
]
else:
A__ = val
return orig_state_dict
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict ):
A__ = torch.load(UpperCAmelCase_ , map_location="""cpu""" )["""model"""]
A__ = get_swin_config(UpperCAmelCase_ )
A__ = SwinForMaskedImageModeling(UpperCAmelCase_ )
model.eval()
A__ = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = ViTImageProcessor(size={"""height""": 192, """width""": 192} )
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
A__ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
with torch.no_grad():
A__ = model(**UpperCAmelCase_ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print(F"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(F"""microsoft/{model_name}""" )
image_processor.push_to_hub(F"""microsoft/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE_ : int = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 335 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 10 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 1 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class snake_case__( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> str:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for a, b in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertAlmostEqual(__UpperCAmelCase , __UpperCAmelCase , delta=__UpperCAmelCase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Dict = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = None
ops.enable_eager_execution_internal()
lowerCAmelCase_ : List[Any] = tf.config.list_physical_devices('''CPU''' )
if len(__UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase_ : Tuple = tf.config.list_logical_devices(device_type='''CPU''' )
lowerCAmelCase_ : str = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase_ : Optional[Any] = GradientAccumulator()
lowerCAmelCase_ : Tuple = tf.Variable([4.0, 3.0] )
lowerCAmelCase_ , lowerCAmelCase_ : Any = create_optimizer(5e-5 , 1_0 , 5 )
lowerCAmelCase_ : Tuple = tf.Variable([0.0, 0.0] , trainable=__UpperCAmelCase )
def accumulate_on_replica(__lowercase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowercase , __lowercase ):
with strategy.scope():
lowerCAmelCase_ : Tuple = strategy.experimental_local_results(__UpperCAmelCase )
local_variables[0].assign(__UpperCAmelCase )
local_variables[1].assign(__UpperCAmelCase )
strategy.run(__UpperCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__UpperCAmelCase )
def _check_local_values(__lowercase , __lowercase ):
lowerCAmelCase_ : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __UpperCAmelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __UpperCAmelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 262 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase__ : Optional[str] = field(
default='./' ,metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for training.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.1 ,metadata={'help': 'Value of weight decay.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 ,metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase__ : Optional[float] = field(default=2E-4 ,metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase__ : Optional[str] = field(default='cosine' ,metadata={'help': 'Learning rate.'} )
lowerCamelCase__ : Optional[int] = field(
default=7_5_0 ,metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_6 ,metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase__ : Optional[int] = field(default=5_0_0_0_0 ,metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Training seed.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_2_4 ,metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase__ : Optional[float] = field(default=0.2 ,metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase__ : Optional[int] = field(default=2_5_6 ,metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase__ : Optional[int] = field(default=0 ,metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.9_5 ,metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0 ,metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase__ : Optional[int] = field(
default=2_0_0 ,metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='eval_results.json' ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='0' ,metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} ,)
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot' ,metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot-clean' ,metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 ,metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0_0 ,metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0 ,metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.2_5 ,metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1.5 ,metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.7 ,metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.8_5 ,metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2' ,metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot-train' ,metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 ,metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase__ : Optional[str] = field(
default='tokenized-codeparrot-train' ,metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2-large' ,metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of the created model.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
| 165 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase ( a_ = True , *a_ , **a_ ) -> str:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
__A = False
if main_process_only:
__A = PartialState().local_process_index == 0
return _tqdm(*a_ , **a_ , disable=a_ )
| 124 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :Tuple = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :int = 'adapter_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Any = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :int = 'tf_model.h5'
SCREAMING_SNAKE_CASE :Tuple = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.ckpt'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors'
SCREAMING_SNAKE_CASE :Any = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :int = 'config.json'
SCREAMING_SNAKE_CASE :List[str] = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[int] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[Any] = 'generation_config.json'
SCREAMING_SNAKE_CASE :Dict = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[Any] = '▁'
SCREAMING_SNAKE_CASE :Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :Tuple = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :Tuple = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 124 | 1 |
"""simple docstring"""
import os
import sys
a :Union[str, Any] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a :int = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str:
return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
| 132 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a :Tuple = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
a :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase ( ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = """https://pypi.org/pypi/diffusers/json"""
SCREAMING_SNAKE_CASE__ : str = json.loads(request.urlopen(__lowerCAmelCase ).read() )["""releases"""].keys()
return sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : version.Version(__lowerCAmelCase ) )
def _lowercase ( ) -> Optional[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = Path(__lowerCAmelCase ) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
init_hf_modules()
SCREAMING_SNAKE_CASE__ : List[Any] = Path(__lowerCAmelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def _lowercase ( __lowerCAmelCase ) -> Tuple:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : int = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__lowerCAmelCase ) )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : List[str] = [module_file]
SCREAMING_SNAKE_CASE__ : str = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE__ : Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : int = Path(__lowerCAmelCase ).parent
SCREAMING_SNAKE_CASE__ : Dict = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE__ : Any = [F'''{f}.py''' for f in new_import_files]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(__lowerCAmelCase ) == 0
all_relative_imports.extend(__lowerCAmelCase )
return all_relative_imports
def _lowercase ( __lowerCAmelCase ) -> Any:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.findall("""^\s*import\s+(\S+)\s*$""" , __lowerCAmelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , __lowerCAmelCase , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE__ : str = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE__ : Tuple = list(set(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = []
for imp in imports:
try:
importlib.import_module(__lowerCAmelCase )
except ImportError:
missing_packages.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(__lowerCAmelCase )}. Run `pip install {' '.join(__lowerCAmelCase )}`''' )
return get_relative_imports(__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__ : str = module_path.replace(os.path.sep , """.""" )
SCREAMING_SNAKE_CASE__ : Any = importlib.import_module(__lowerCAmelCase )
if class_name is None:
return find_pipeline_class(__lowerCAmelCase )
return getattr(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE__ : Tuple = dict(inspect.getmembers(__lowerCAmelCase , inspect.isclass ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __lowerCAmelCase )
and cls.__module__.split(""".""" )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = cls
return pipeline_class
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict:
SCREAMING_SNAKE_CASE__ : str = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""" ) == 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE__ : List[Any] = """v""" + """.""".join(__version__.split(""".""" )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE__ : Any = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE__ : List[str] = F'''v{revision}'''
elif revision == "main":
SCREAMING_SNAKE_CASE__ : int = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE__ : int = COMMUNITY_PIPELINES_URL.format(revision=__lowerCAmelCase , pipeline=__lowerCAmelCase )
try:
SCREAMING_SNAKE_CASE__ : Dict = cached_download(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = """git"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE__ : Any = hf_hub_download(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , local_files_only=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : Dict = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE__ : Optional[int] = check_imports(__lowerCAmelCase )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE__ : Any = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = Path(__lowerCAmelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE__ : Tuple = F'''{module_needed}.py'''
shutil.copy(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Dict = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE__ : Optional[int] = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = model_info(__lowerCAmelCase , revision=__lowerCAmelCase , token=__lowerCAmelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE__ : Optional[Any] = submodule_path / commit_hash
SCREAMING_SNAKE_CASE__ : Optional[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__lowerCAmelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__lowerCAmelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__lowerCAmelCase , F'''{module_needed}.py''' , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_cached_module_file(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
return get_class_in_module(__lowerCAmelCase , final_module.replace(""".py""" , """""" ) )
| 132 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Tuple = torch.device('''cpu''')
def __lowercase ( ):
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowercase ( __lowerCAmelCase : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int ):
a__ = []
for k in state_dict.keys():
a__ = k
if ".pwconv" in k:
a__ = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ = k_new.split('.' )
if ls[2].isdigit():
a__ = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ = [3, 3, 6, 4]
a__ = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
a__ = [3, 3, 9, 6]
a__ = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
a__ = [4, 3, 1_0, 5]
a__ = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
a__ = [4, 4, 1_2, 6]
a__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' , check_hash=__lowerCAmelCase )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = checkpoint
a__ = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
a__ = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
a__ = prepare_img()
a__ = ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ = processor(images=__lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
a__ = get_expected_output(__lowerCAmelCase )
a__ = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
snake_case : Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 109 |
from collections import defaultdict
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ):
a__ = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
'''simple docstring'''
import numpy as np
_SCREAMING_SNAKE_CASE : List[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _snake_case :
def __init__( self ) -> Any:
'''simple docstring'''
snake_case_ = np.array(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = np.where(letter == self.SQUARE )
snake_case_ = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCAmelCase__ ( self , a__ , a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
snake_case_ = message.lower()
snake_case_ = message.replace(" " , "" )
snake_case_ = message.replace("j" , "i" )
snake_case_ = np.empty((2, len(a__ )) )
for letter_index in range(len(a__ ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape(2 * len(a__ ) )
snake_case_ = """"""
for numbers_index in range(len(a__ ) ):
snake_case_ = int(second_step[numbers_index * 2] )
snake_case_ = int(second_step[(numbers_index * 2) + 1] )
snake_case_ = self.numbers_to_letter(a__ , a__ )
snake_case_ = encoded_message + letter
return encoded_message
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = message.lower()
message.replace(" " , "" )
snake_case_ = np.empty(2 * len(a__ ) )
for letter_index in range(len(a__ ) ):
snake_case_ = self.letter_to_numbers(message[letter_index] )
snake_case_ = numbers[0]
snake_case_ = numbers[1]
snake_case_ = first_step.reshape((2, len(a__ )) )
snake_case_ = """"""
for numbers_index in range(len(a__ ) ):
snake_case_ = int(second_step[0, numbers_index] )
snake_case_ = int(second_step[1, numbers_index] )
snake_case_ = self.numbers_to_letter(a__ , a__ )
snake_case_ = decoded_message + letter
return decoded_message
| 85 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
lowercase : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase : Dict = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case="This is a sound of {}." ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : Optional[Any] = requests.get(snake_case ).content
else:
with open(snake_case ,"""rb""" ) as f:
lowercase : Union[str, Any] = f.read()
if isinstance(snake_case ,snake_case ):
lowercase : int = ffmpeg_read(snake_case ,self.feature_extractor.sampling_rate )
if not isinstance(snake_case ,np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" )
lowercase : Tuple = candidate_labels
lowercase : Tuple = [hypothesis_template.format(snake_case ) for x in candidate_labels]
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=self.framework ,padding=snake_case )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = model_inputs.pop("""candidate_labels""" )
lowercase : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,snake_case ):
lowercase : List[Any] = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : Optional[Any] = self.model(**snake_case ,**snake_case )
lowercase : Any = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = model_outputs.pop("""candidate_labels""" )
lowercase : Any = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase : Any = logits.softmax(dim=0 )
lowercase : Tuple = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case ,snake_case ) ,key=lambda snake_case : -x[0] )
]
return result
| 20 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =FunnelConfig.from_json_file(lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
a =FunnelBaseModel(lowercase ) if base_model else FunnelModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(lowercase , lowercase , lowercase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
) | 365 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowerCamelCase_ : str = ["""bert-base-uncased""", """bert-base-cased"""]
lowerCamelCase_ : List[str] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class __A ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , __A ) -> Dict:
super().__init__()
a =tokenizer
a =AutoConfig.from_pretrained(__A )
a =TFAutoModel.from_config(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> int:
a =self.tokenizer(__A )
a =self.bert(**__A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> str:
super().setUp()
a =[
BertTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
a =[TFBertTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__A , use_fast_bert_tokenizer=__A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
a =[
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
a =list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
a =tokenizer(__A , return_tensors='''tf''' , padding='''longest''' )
a =tf_tokenizer(__A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> str:
for tf_tokenizer in self.tf_tokenizers:
a =tf_tokenizer(self.paired_sentences )
a =tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
a =tf.function(__A )
for test_inputs in (self.test_sentences, self.paired_sentences):
a =tf.constant(__A )
a =compiled_tokenizer(__A )
a =tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
a =ModelToSave(tokenizer=__A )
a =tf.convert_to_tensor(self.test_sentences )
a =model(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
a =Path(__A ) / '''saved.model'''
model.save(__A )
a =tf.keras.models.load_model(__A )
a =loaded_model(__A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 ) | 215 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ''
_lowerCamelCase = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Any, lowerCAmelCase : str = "", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[dict] = None, **lowerCAmelCase : str ) -> int:
super().__init__(self, **lowerCAmelCase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase : Any = fsspec.open(
lowerCAmelCase, mode='rb', protocol=lowerCAmelCase, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
lowercase : Dict = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowercase : Any = None
@classmethod
def lowercase ( cls : str, lowerCAmelCase : Optional[Any] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase ).lstrip('/' )
def lowercase ( self : Dict ) -> Dict:
if self.dir_cache is None:
lowercase : int = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowercase : List[str] = {f['name']: f}
def lowercase ( self : List[Any], lowerCAmelCase : str ) -> str:
return self.file.open().read()
def lowercase ( self : Optional[int], lowerCAmelCase : str, lowerCAmelCase : str = "rb", lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : Union[str, Any]=True, lowerCAmelCase : int=None, **lowerCAmelCase : List[str], ) -> Optional[int]:
lowercase : Optional[int] = self._strip_protocol(lowerCAmelCase )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'bz2'
_lowerCamelCase = 'bz2'
_lowerCamelCase = '.bz2'
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'gzip'
_lowerCamelCase = 'gzip'
_lowerCamelCase = '.gz'
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'lz4'
_lowerCamelCase = 'lz4'
_lowerCamelCase = '.lz4'
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'xz'
_lowerCamelCase = 'xz'
_lowerCamelCase = '.xz'
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'zstd'
_lowerCamelCase = 'zstd'
_lowerCamelCase = '.zst'
def __init__( self : Union[str, Any], lowerCAmelCase : str, lowerCAmelCase : str = "rb", lowerCAmelCase : Optional[str] = None, lowerCAmelCase : Optional[dict] = None, lowerCAmelCase : int = DEFAULT_BLOCK_SIZE, **lowerCAmelCase : Dict, ) -> List[Any]:
super().__init__(
fo=lowerCAmelCase, mode=lowerCAmelCase, target_protocol=lowerCAmelCase, target_options=lowerCAmelCase, block_size=lowerCAmelCase, **lowerCAmelCase, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase : Union[str, Any] = self.file.__enter__
class a__ :
def __init__( self : str, lowerCAmelCase : Tuple ) -> Union[str, Any]:
lowercase : str = file_
def __enter__( self : List[Any] ) -> int:
self._file.__enter__()
return self
def __exit__( self : Optional[int], *lowerCAmelCase : Union[str, Any], **lowerCAmelCase : Any ) -> Union[str, Any]:
self._file.__exit__(*lowerCAmelCase, **lowerCAmelCase )
def __iter__( self : Union[str, Any] ) -> Union[str, Any]:
return iter(self._file )
def lowercase ( self : Optional[Any] ) -> Tuple:
return next(self._file )
def __getattr__( self : Tuple, lowerCAmelCase : Optional[Any] ) -> Optional[int]:
return getattr(self._file, lowerCAmelCase )
def fixed_enter(*lowerCAmelCase : List[Any], **lowerCAmelCase : str ):
return WrappedFile(_enter(*lowerCAmelCase, **lowerCAmelCase ) )
lowercase : Optional[int] = fixed_enter
| 255 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase: List[Any] = logging.get_logger(__name__)
_UpperCamelCase: int = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = 'megatron-bert'
def __init__( self : int, lowerCAmelCase : List[Any]=29056, lowerCAmelCase : int=1024, lowerCAmelCase : List[str]=24, lowerCAmelCase : Union[str, Any]=16, lowerCAmelCase : Union[str, Any]=4096, lowerCAmelCase : Dict="gelu", lowerCAmelCase : List[str]=0.1, lowerCAmelCase : Any=0.1, lowerCAmelCase : str=512, lowerCAmelCase : str=2, lowerCAmelCase : Any=0.02, lowerCAmelCase : Any=1e-12, lowerCAmelCase : List[str]=0, lowerCAmelCase : List[str]="absolute", lowerCAmelCase : Any=True, **lowerCAmelCase : Union[str, Any], ) -> Tuple:
super().__init__(pad_token_id=lowerCAmelCase, **lowerCAmelCase )
lowercase : Tuple = vocab_size
lowercase : Any = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[int] = num_attention_heads
lowercase : Optional[int] = hidden_act
lowercase : Optional[int] = intermediate_size
lowercase : List[Any] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Optional[int] = max_position_embeddings
lowercase : Optional[int] = type_vocab_size
lowercase : Any = initializer_range
lowercase : Any = layer_norm_eps
lowercase : Optional[int] = position_embedding_type
lowercase : Optional[int] = use_cache
| 255 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str=7 ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_UpperCAmelCase = '636036'
_UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_UpperCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
return result["workflow_runs"]
def A ( _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = get_daily_ci_runs(_UpperCAmelCase )
_UpperCAmelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_UpperCAmelCase = workflow_run['id']
break
return workflow_run_id
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = get_last_daily_ci_runs(_UpperCAmelCase )
if workflow_run_id is not None:
_UpperCAmelCase = get_artifacts_links(worflow_run_id=_UpperCAmelCase , token=_UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_UpperCAmelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=_UpperCAmelCase , artifact_url=_UpperCAmelCase , output_dir=_UpperCAmelCase , token=_UpperCAmelCase )
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
get_last_daily_ci_artifacts(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = {}
for artifact_name in artifact_names:
_UpperCAmelCase = os.path.join(_UpperCAmelCase , F"{artifact_name}.zip" )
if os.path.isfile(_UpperCAmelCase ):
_UpperCAmelCase = {}
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
with z.open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.read().decode('UTF-8' )
return results
| 290 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
'''simple docstring'''
def A_ ( snake_case ):
assert (
isinstance(snake_case , snake_case ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A_ = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 6_55_36,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_80_00,
"sample_size": 13_10_72,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_60_00,
"sample_size": 6_55_36,
},
}
def A_ ( snake_case , snake_case ):
return torch.atana(snake_case , snake_case ) / math.pi * 2
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class _snake_case ( _a ):
pass
class _snake_case ( nn.Module ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ):
super().__init__()
SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 )
SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
A_ = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
A_ = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
A_ = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
A_ = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
A_ = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def A_ ( snake_case ):
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A_ ( snake_case ):
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A_ ( snake_case , snake_case=13 ):
SCREAMING_SNAKE_CASE:Optional[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE:List[str] = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE:int = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE:str = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE:Tuple = string[:2]
SCREAMING_SNAKE_CASE:Optional[Any] = string[2:]
else:
SCREAMING_SNAKE_CASE:Optional[Any] = string[0]
SCREAMING_SNAKE_CASE:Optional[Any] = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:List[str] = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}'''
elif depth > 0 and int(snake_case ) > 7:
SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
SCREAMING_SNAKE_CASE:List[Any] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case )
SCREAMING_SNAKE_CASE:List[Any] = new_string_left
if not isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE:str = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = v
return new_state_dict
def A_ ( snake_case , snake_case , snake_case ):
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE:Optional[Any] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0]
SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
SCREAMING_SNAKE_CASE:List[str] = download(snake_case )
SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE:Union[str, Any] = Object()
SCREAMING_SNAKE_CASE:int = sample_size
SCREAMING_SNAKE_CASE:Any = sample_rate
SCREAMING_SNAKE_CASE:List[str] = 0
SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict()
SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case )
SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE:Dict = value.squeeze()
SCREAMING_SNAKE_CASE:Union[str, Any] = value
diffusers_model.load_state_dict(snake_case )
SCREAMING_SNAKE_CASE:int = 100
SCREAMING_SNAKE_CASE:int = 33
SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case )
SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
main(args)
| 139 | 1 |
import csv
import tweepy
# Twitter API credentials
__UpperCAmelCase : str = ""
__UpperCAmelCase : Optional[Any] = ""
__UpperCAmelCase : Optional[Any] = ""
__UpperCAmelCase : Any = ""
def A__ ( SCREAMING_SNAKE_CASE__) -> None:
# authorize twitter, initialize tweepy
__snake_case: Any = tweepy.OAuthHandler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
auth.set_access_token(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = tweepy.API(SCREAMING_SNAKE_CASE__)
# initialize a list to hold all the tweepy Tweets
__snake_case: Dict = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__snake_case: int = api.user_timeline(screen_name=SCREAMING_SNAKE_CASE__ , count=200)
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__)
# save the id of the oldest tweet less one
__snake_case: Optional[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(SCREAMING_SNAKE_CASE__) > 0:
print(F'''getting tweets before {oldest}''')
# all subsequent requests use the max_id param to prevent duplicates
__snake_case: str = api.user_timeline(
screen_name=SCREAMING_SNAKE_CASE__ , count=200 , max_id=SCREAMING_SNAKE_CASE__)
# save most recent tweets
alltweets.extend(SCREAMING_SNAKE_CASE__)
# update the id of the oldest tweet less one
__snake_case: str = alltweets[-1].id - 1
print(F'''...{len(SCREAMING_SNAKE_CASE__)} tweets downloaded so far''')
# transform the tweepy tweets into a 2D array that will populate the csv
__snake_case: Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""") as f:
__snake_case: str = csv.writer(SCREAMING_SNAKE_CASE__)
writer.writerow(["""id""", """created_at""", """text"""])
writer.writerows(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 356 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def A__ ( UpperCAmelCase_ = 1_5_0_0_0_0_0 ):
_UpperCamelCase : Optional[int] = defaultdict(UpperCAmelCase_ )
_UpperCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
_UpperCamelCase : int = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowercase ).to(__lowercase )
__lowerCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCAmelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = model(input_ids.to(__lowercase ) , labels=labels.to(__lowercase ) ).loss
__lowerCAmelCase = -(labels.shape[-1] * loss.item())
__lowerCAmelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 174 | 0 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a__ : Optional[int] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Tuple , _lowercase : int , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : int = 5_02_57 , _lowercase : int = 10_24 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : Optional[int] = None , _lowercase : str = "gelu_new" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 1E-5 , _lowercase : float = 0.02 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = False , _lowercase : bool = False , ):
super().__init__()
__UpperCAmelCase = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
__UpperCAmelCase = prefix_inner_dim
__UpperCAmelCase = prefix_hidden_dim
__UpperCAmelCase = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__UpperCAmelCase = (
nn.Linear(self.prefix_hidden_dim , _lowercase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__UpperCAmelCase = GPTaConfig(
vocab_size=_lowercase , n_positions=_lowercase , n_embd=_lowercase , n_layer=_lowercase , n_head=_lowercase , n_inner=_lowercase , activation_function=_lowercase , resid_pdrop=_lowercase , embd_pdrop=_lowercase , attn_pdrop=_lowercase , layer_norm_epsilon=_lowercase , initializer_range=_lowercase , scale_attn_weights=_lowercase , use_cache=_lowercase , scale_attn_by_inverse_layer_idx=_lowercase , reorder_and_upcast_attn=_lowercase , )
__UpperCAmelCase = GPTaLMHeadModel(_lowercase )
def a ( self : List[str] , _lowercase : torch.Tensor , _lowercase : torch.Tensor , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None , ):
__UpperCAmelCase = self.transformer.transformer.wte(_lowercase )
__UpperCAmelCase = self.encode_prefix(_lowercase )
__UpperCAmelCase = self.decode_prefix(_lowercase )
__UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 )
__UpperCAmelCase = self.transformer(inputs_embeds=_lowercase , labels=_lowercase , attention_mask=_lowercase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def a ( self : List[Any] , _lowercase : int , _lowercase : torch.device ):
return torch.zeros(_lowercase , self.prefix_length , dtype=torch.intaa , device=_lowercase )
def a ( self : str , _lowercase : List[str] ):
return self.encode_prefix(_lowercase )
@torch.no_grad()
def a ( self : Optional[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
__UpperCAmelCase = torch.split(_lowercase , 1 , dim=0 )
__UpperCAmelCase = []
__UpperCAmelCase = []
for feature in features:
__UpperCAmelCase = self.decode_prefix(feature.to(_lowercase ) ) # back to the clip feature
# Only support beam search for now
__UpperCAmelCase , __UpperCAmelCase = self.generate_beam(
input_embeds=_lowercase , device=_lowercase , eos_token_id=_lowercase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__UpperCAmelCase = torch.stack(_lowercase )
__UpperCAmelCase = torch.stack(_lowercase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def a ( self : str , _lowercase : Any=None , _lowercase : Dict=None , _lowercase : Tuple=None , _lowercase : int = 5 , _lowercase : int = 67 , _lowercase : float = 1.0 , _lowercase : Optional[int] = None , ):
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = torch.ones(_lowercase , device=_lowercase , dtype=torch.int )
__UpperCAmelCase = torch.zeros(_lowercase , device=_lowercase , dtype=torch.bool )
if input_embeds is not None:
__UpperCAmelCase = input_embeds
else:
__UpperCAmelCase = self.transformer.transformer.wte(_lowercase )
for i in range(_lowercase ):
__UpperCAmelCase = self.transformer(inputs_embeds=_lowercase )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__UpperCAmelCase = logits.softmax(-1 ).log()
if scores is None:
__UpperCAmelCase , __UpperCAmelCase = logits.topk(_lowercase , -1 )
__UpperCAmelCase = generated.expand(_lowercase , *generated.shape[1:] )
__UpperCAmelCase , __UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__UpperCAmelCase = next_tokens
else:
__UpperCAmelCase = tokens.expand(_lowercase , *tokens.shape[1:] )
__UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
else:
__UpperCAmelCase = -float(np.inf )
__UpperCAmelCase = 0
__UpperCAmelCase = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__UpperCAmelCase = scores_sum / seq_lengths[:, None]
__UpperCAmelCase , __UpperCAmelCase = scores_sum_average.view(-1 ).topk(_lowercase , -1 )
__UpperCAmelCase = next_tokens // scores_sum.shape[1]
__UpperCAmelCase = seq_lengths[next_tokens_source]
__UpperCAmelCase = next_tokens % scores_sum.shape[1]
__UpperCAmelCase = next_tokens.unsqueeze(1 )
__UpperCAmelCase = tokens[next_tokens_source]
__UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 )
__UpperCAmelCase = generated[next_tokens_source]
__UpperCAmelCase = scores_sum_average * seq_lengths
__UpperCAmelCase = is_stopped[next_tokens_source]
__UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 )
__UpperCAmelCase = is_stopped + next_tokens.eq(_lowercase ).squeeze()
if is_stopped.all():
break
__UpperCAmelCase = scores / seq_lengths
__UpperCAmelCase = scores.argsort(descending=_lowercase )
# tokens tensors are already padded to max_seq_length
__UpperCAmelCase = [tokens[i] for i in order]
__UpperCAmelCase = torch.stack(_lowercase , dim=0 )
__UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 86 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __snake_case ( a ):
def __init__( self : Optional[Any] , _snake_case : str="" , _snake_case : str="train"):
"""simple docstring"""
assert os.path.isdir(_snake_case)
UpperCAmelCase_ = []
UpperCAmelCase_ = os.listdir(_snake_case)
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ = os.path.join(_snake_case , _snake_case)
if not os.path.isfile(_snake_case):
continue
self.documents.append(_snake_case)
def __len__( self : List[Any]):
"""simple docstring"""
return len(self.documents)
def __getitem__( self : List[str] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.documents[idx]
UpperCAmelCase_ = document_path.split('''/''')[-1]
with open(_snake_case , encoding='''utf-8''') as source:
UpperCAmelCase_ = source.read()
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
return document_name, story_lines, summary_lines
def A (__A : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = list(filter(lambda __A : len(__A ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ = [_add_missing_period(__A ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ = []
UpperCAmelCase_ = deque(__A )
while True:
try:
UpperCAmelCase_ = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ = list(filter(lambda __A : not t.startswith('''@highlight''' ) , __A ) )
return story_lines, summary_lines
def A (__A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def A (__A : List[Any] , __A : str , __A : List[Any] ) -> List[str]:
"""simple docstring"""
if len(__A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__A )) )
return sequence
def A (__A : str , __A : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = torch.ones_like(__A )
UpperCAmelCase_ = sequence == pad_token_id
UpperCAmelCase_ = 0
return mask
def A (__A : int , __A : Dict , __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = [tokenizer.encode(__A ) for line in story_lines]
UpperCAmelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ = [tokenizer.encode(__A ) for line in summary_lines]
UpperCAmelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def A (__A : int , __A : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
for sequence in batch:
UpperCAmelCase_ = -1
UpperCAmelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__A )
return torch.tensor(__A )
| 51 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__: Tuple = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: Union[str, Any] = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__magic_name__: Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__magic_name__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 342 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = 8
# DPR tok
UpperCamelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(A_ , exist_ok=A_ )
UpperCamelCase = os.path.join(A_ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(A_ , exist_ok=A_ )
UpperCamelCase = os.path.join(A_ , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(A_ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def UpperCAmelCase_ ( self )-> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCAmelCase_ ( self )-> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = os.path.join(self.tmpdirname , 'rag_tokenizer' )
UpperCamelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
UpperCamelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(A_ )
rag_tokenizer.save_pretrained(A_ )
UpperCamelCase = RagTokenizer.from_pretrained(A_ , config=A_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , A_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , A_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
UpperCamelCase = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCamelCase = tokenizer(A_ )
self.assertIsNotNone(A_ )
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
UpperCamelCase = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
UpperCamelCase = tokenizer(A_ )
self.assertIsNotNone(A_ )
| 251 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = BlenderbotConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , )-> List[Any]:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def UpperCAmelCase_ ( self , A_ , A_ )-> int:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModel(config=A_ ).get_decoder()
UpperCamelCase = inputs_dict['input_ids']
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict['attention_mask'][:1, :]
UpperCamelCase = inputs_dict['head_mask']
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase = model(A_ , attention_mask=A_ )[0]
UpperCamelCase = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def A_( A : List[Any] , A : Tuple , A : Optional[Any] , A : List[str]=None , A : str=None , A : List[Any]=None , A : Dict=None , A : Any=None , ):
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFBlenderbotModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
lowerCAmelCase_ = ["""My friends are cool but they eat too many carbs."""]
lowerCAmelCase_ = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , )
UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 251 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : Tuple = "hf-internal-testing/tiny-random-t5"
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_ )
_UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
_UpperCAmelCase : List[Any] = tokenizer("""This is me""" , return_tensors="""pt""" )
_UpperCAmelCase : List[Any] = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCAmelCase : Dict = model.generate(**lowercase_ )
_UpperCAmelCase : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
_UpperCAmelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCAmelCase : Tuple = model_reloaded.generate(**lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ ) )
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = "hf-internal-testing/tiny-random-t5"
_UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase_ )
_UpperCAmelCase : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase_ ):
model.save_pretrained(lowercase_ )
_UpperCAmelCase : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(lowercase_ )
| 351 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Union[str, Any] = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
else:
return _interleave_iterable_datasets(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , stopping_strategy=lowerCAmelCase_ )
def __A ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , ):
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(lowerCAmelCase_ ):
if not isinstance(lowerCAmelCase_ , (Dataset, IterableDataset) ):
if isinstance(lowerCAmelCase_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
"""is an empty dataset dictionary.""" )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(lowerCAmelCase_ )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowerCAmelCase_ ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowerCAmelCase_ ).__name__}." )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else (IterableDataset, Dataset)
)
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
else:
return _concatenate_iterable_datasets(lowerCAmelCase_ , info=lowerCAmelCase_ , split=lowerCAmelCase_ , axis=lowerCAmelCase_ )
| 170 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_snake_case = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
if self.train_file is not None:
snake_case : Any = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
snake_case : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = 42
_snake_case = True
_snake_case = None
_snake_case = None
def __call__( self , A ) -> Tuple:
snake_case : Tuple = """label""" if """label""" in features[0].keys() else """labels"""
snake_case : Union[str, Any] = [feature.pop(A ) for feature in features]
snake_case : Tuple = len(A )
snake_case : Dict = len(features[0]["""input_ids"""] )
snake_case : Optional[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
snake_case : Union[str, Any] = list(chain(*A ) )
snake_case : List[str] = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
snake_case : List[Any] = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
snake_case : List[Any] = torch.tensor(A , dtype=torch.intaa )
return batch
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case , snake_case , snake_case : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case , snake_case , snake_case : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" ,lowercase ,lowercase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : str = training_args.get_process_log_level()
logger.setLevel(lowercase )
datasets.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
snake_case : str = {}
if data_args.train_file is not None:
snake_case : List[str] = data_args.train_file
if data_args.validation_file is not None:
snake_case : str = data_args.validation_file
snake_case : List[str] = data_args.train_file.split(""".""" )[-1]
snake_case : int = load_dataset(
lowercase ,data_files=lowercase ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
snake_case : Optional[int] = load_dataset(
"""swag""" ,"""regular""" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
snake_case : List[str] = [f"""ending{i}""" for i in range(4 )]
snake_case : Any = """sent1"""
snake_case : List[Any] = """sent2"""
if data_args.max_seq_length is None:
snake_case : Optional[int] = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
snake_case : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
snake_case : List[Any] = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowercase ):
snake_case : str = [[context] * 4 for context in examples[context_name]]
snake_case : Tuple = examples[question_header_name]
snake_case : int = [
[f"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(lowercase )
]
# Flatten out
snake_case : Optional[int] = list(chain(*lowercase ) )
snake_case : str = list(chain(*lowercase ) )
# Tokenize
snake_case : Optional[Any] = tokenizer(
lowercase ,lowercase ,truncation=lowercase ,max_length=lowercase ,padding="""max_length""" if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(lowercase ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
snake_case : str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
snake_case : str = min(len(lowercase ) ,data_args.max_train_samples )
snake_case : List[str] = train_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
snake_case : Union[str, Any] = train_dataset.map(
lowercase ,batched=lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
snake_case : Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
snake_case : Tuple = min(len(lowercase ) ,data_args.max_eval_samples )
snake_case : Dict = eval_dataset.select(range(lowercase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
snake_case : int = eval_dataset.map(
lowercase ,batched=lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
snake_case : Dict = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowercase ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowercase ):
snake_case , snake_case : int = eval_predictions
snake_case : str = np.argmax(lowercase ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
snake_case : List[Any] = Trainer(
model=lowercase ,args=lowercase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=lowercase ,data_collator=lowercase ,compute_metrics=lowercase ,)
# Training
if training_args.do_train:
snake_case : str = None
if training_args.resume_from_checkpoint is not None:
snake_case : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[str] = last_checkpoint
snake_case : int = trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case : List[Any] = train_result.metrics
snake_case : int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase )
)
snake_case : int = min(lowercase ,len(lowercase ) )
trainer.log_metrics("""train""" ,lowercase )
trainer.save_metrics("""train""" ,lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case : Tuple = trainer.evaluate()
snake_case : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase )
snake_case : List[str] = min(lowercase ,len(lowercase ) )
trainer.log_metrics("""eval""" ,lowercase )
trainer.save_metrics("""eval""" ,lowercase )
snake_case : Any = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 124 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""image_processor""", """tokenizer"""]
_snake_case = """FlavaImageProcessor"""
_snake_case = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , A=None , A=None , **A ) -> Tuple:
snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A , )
snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A , A )
snake_case : Dict = self.image_processor
def __call__( self , A = None , A = None , A = True , A = False , A = False , A = None , A = 0 , A = None , A = None , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> Tuple:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case : str = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
if images is not None:
snake_case : Tuple = self.image_processor(
A , return_image_mask=A , return_codebook_pixels=A , return_tensors=A , **A , )
if text is not None and images is not None:
encoding.update(A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def UpperCAmelCase ( self , *A , **A ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase ( self , *A , **A ) -> int:
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase ( self ) -> str:
snake_case : Any = self.tokenizer.model_input_names
snake_case : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A , )
return self.image_processor
| 124 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __snake_case (ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Tuple = CursorInfo()
_lowerCAmelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def _UpperCAmelCase ():
'''simple docstring'''
if os.name == "nt":
_lowerCAmelCase : Any = CursorInfo()
_lowerCAmelCase : str = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
_lowerCAmelCase : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase_ , ctypes.byref(UpperCamelCase_ ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def _UpperCAmelCase ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 159 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A: str = logging.get_logger(__name__)
A: str = "▁"
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A: Dict = {
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
A: List[Any] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = ['input_ids', 'attention_mask']
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : Any = legacy_behaviour
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase : str = 1
UpperCAmelCase : Optional[Any] = len(self.sp_model )
UpperCAmelCase : Dict = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_SCREAMING_SNAKE_CASE )
}
UpperCAmelCase : Any = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase : Optional[int] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase : List[str] = src_lang if src_lang is not None else """eng_Latn"""
UpperCAmelCase : str = self.lang_code_to_id[self._src_lang]
UpperCAmelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = [1] * len(self.prefix_tokens )
UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(_SCREAMING_SNAKE_CASE )) + ([0] * len(_SCREAMING_SNAKE_CASE )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : Any = src_lang
UpperCAmelCase : Dict = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase : Optional[int] = self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = """""".join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "eng_Latn" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "fra_Latn" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : List[Any] = [self.cur_lang_code]
UpperCAmelCase : Optional[Any] = [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : List[str] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
UpperCAmelCase : Any = []
UpperCAmelCase : List[Any] = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase : Tuple = [self.cur_lang_code]
UpperCAmelCase : Optional[int] = [self.eos_token_id]
| 109 |
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _snake_case ( UpperCamelCase : Dataset , UpperCamelCase : Dict[str, str] ):
UpperCAmelCase : Any = args.log_outputs
UpperCAmelCase : Any = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCAmelCase : List[Any] = load_metric("""wer""" )
UpperCAmelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCAmelCase : int = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCAmelCase : str = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCAmelCase : Tuple = F"WER: {wer_result}\nCER: {cer_result}"
print(UpperCamelCase )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(UpperCamelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase : str = F"log_{dataset_id}_predictions.txt"
UpperCAmelCase : Tuple = F"log_{dataset_id}_targets.txt"
with open(UpperCamelCase , """w""" ) as p, open(UpperCamelCase , """w""" ) as t:
# mapping function to write output
def write_to_file(UpperCamelCase : List[Any] , UpperCamelCase : List[Any] ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(UpperCamelCase , with_indices=UpperCamelCase )
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase : Dict = re.sub(UpperCamelCase , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCAmelCase : Optional[Any] = """ """.join(text.split(UpperCamelCase ) )
return text
def _snake_case ( UpperCamelCase : Tuple ):
# load dataset
UpperCAmelCase : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCamelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=UpperCamelCase ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase : Optional[int] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase : Tuple = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCamelCase : Any ):
UpperCAmelCase : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase : Tuple = prediction["""text"""]
UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCAmelCase : int = dataset.map(UpperCamelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
A: List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
)
parser.add_argument(
"--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
)
parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
parser.add_argument(
"--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
)
parser.add_argument(
"--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
)
parser.add_argument(
"--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
)
parser.add_argument(
"--device",
type=int,
default=None,
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
)
A: Union[str, Any] = parser.parse_args()
main(args)
| 109 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a :List[str] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__a :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__a :int = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[Any] = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A__ , hypotheses=A__ , min_len=A__ , max_len=A__ )
} | 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) ) | 329 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
lowercase = StableDiffusionXLImgaImgPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=a_ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
UpperCamelCase_ : List[str] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
UpperCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
UpperCamelCase_ : Union[str, Any] = CLIPTextModel(a_ )
UpperCamelCase_ : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=a_ )
UpperCamelCase_ : str = CLIPTextModelWithProjection(a_ )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=a_ )
UpperCamelCase_ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Dict , snake_case : Optional[int]=0 ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase_ : Dict = image / 2 + 0.5
if str(a_ ).startswith('mps' ):
UpperCamelCase_ : List[Any] = torch.manual_seed(a_ )
else:
UpperCamelCase_ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : Optional[int] = self.get_dummy_components()
UpperCamelCase_ : str = StableDiffusionXLImgaImgPipeline(**a_ )
UpperCamelCase_ : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(a_ )
UpperCamelCase_ : Optional[Any] = sd_pipe(**a_ ).images
UpperCamelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCamelCase_ : List[str] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = self.get_dummy_components()
UpperCamelCase_ : Tuple = StableDiffusionXLImgaImgPipeline(**a_ )
UpperCamelCase_ : Any = sd_pipe.to(a_ )
UpperCamelCase_ : Optional[int] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
# forward without prompt embeds
UpperCamelCase_ : Any = self.get_dummy_inputs(a_ )
UpperCamelCase_ : List[str] = 3 * ["""this is a negative prompt"""]
UpperCamelCase_ : str = negative_prompt
UpperCamelCase_ : str = 3 * [inputs["""prompt"""]]
UpperCamelCase_ : Any = sd_pipe(**a_ )
UpperCamelCase_ : int = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
UpperCamelCase_ : Dict = self.get_dummy_inputs(a_ )
UpperCamelCase_ : int = 3 * ["""this is a negative prompt"""]
UpperCamelCase_ : Tuple = 3 * [inputs.pop('prompt' )]
(
UpperCamelCase_
) : List[str] = sd_pipe.encode_prompt(a_ , negative_prompt=a_ )
UpperCamelCase_ : Any = sd_pipe(
**a_ , prompt_embeds=a_ , negative_prompt_embeds=a_ , pooled_prompt_embeds=a_ , negative_pooled_prompt_embeds=a_ , )
UpperCamelCase_ : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[Any]="cpu" , snake_case : List[str]=torch.floataa , snake_case : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase_ : List[Any] = np.random.RandomState(a_ ).standard_normal((1, 4, 6_4, 6_4) )
UpperCamelCase_ : List[Any] = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
UpperCamelCase_ : Optional[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
UpperCamelCase_ : Any = self.get_inputs(a_ )
UpperCamelCase_ : Dict = pipe(**a_ ).images
UpperCamelCase_ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCamelCase_ : Dict = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 175 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Optional[int] = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 | 0 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : str = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" ,__snake_case ).groups()[0]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Any=None) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = file_names
__lowerCAmelCase : Optional[int] = image_transform
__lowerCAmelCase : List[Any] = label_to_id
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.file_names[idx]
__lowerCAmelCase : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = raw_image.convert("RGB")
if self.image_transform is not None:
__lowerCAmelCase : Union[str, Any] = self.image_transform(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = extract_label(_SCREAMING_SNAKE_CASE)
if self.label_to_id is not None:
__lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="all" ,project_dir=args.project_dir )
else:
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : int = config["lr"]
__lowerCAmelCase : Union[str, Any] = int(config["num_epochs"] )
__lowerCAmelCase : Tuple = int(config["seed"] )
__lowerCAmelCase : Tuple = int(config["batch_size"] )
__lowerCAmelCase : int = config["image_size"]
if not isinstance(__snake_case ,(list, tuple) ):
__lowerCAmelCase : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase : Dict = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case ,__snake_case )
# Grab all the image filenames
__lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir ,__snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase : Any = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase : List[str] = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase : Dict = int(0.8 * len(__snake_case ) )
__lowerCAmelCase : str = random_perm[:cut]
__lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase : str = Compose([RandomResizedCrop(__snake_case ,scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase : Union[str, Any] = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase : Union[str, Any] = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
__lowerCAmelCase : Any = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : int = create_model("resnet50d" ,pretrained=__snake_case ,num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[str] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase : Any = False
for param in model.get_classifier().parameters():
__lowerCAmelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__snake_case ,max_lr=__snake_case ,epochs=__snake_case ,steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase : Optional[int] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase : str = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase : Dict = int(training_difference.replace("epoch_" ,"" ) ) + 1
__lowerCAmelCase : Optional[Any] = None
else:
__lowerCAmelCase : Any = int(training_difference.replace("step_" ,"" ) )
__lowerCAmelCase : Optional[int] = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case ,__snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase : Optional[int] = accelerator.skip_first_batches(__snake_case ,__snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
__lowerCAmelCase : Optional[int] = model(__snake_case )
__lowerCAmelCase : List[str] = torch.nn.functional.cross_entropy(__snake_case ,batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(__snake_case )
__lowerCAmelCase : List[str] = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} ,step=__snake_case ,)
if checkpointing_steps == "epoch":
__lowerCAmelCase : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" ,required=__snake_case ,help="The data folder on disk." )
parser.add_argument("--fp16" ,action="store_true" ,help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" ,type=__snake_case ,default=__snake_case ,help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." ,)
parser.add_argument(
"--output_dir" ,type=__snake_case ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--resume_from_checkpoint" ,type=__snake_case ,default=__snake_case ,help="If the training should continue from a checkpoint folder." ,)
parser.add_argument(
"--with_tracking" ,action="store_true" ,help="Whether to load in all available experiment trackers from the environment and use them for logging." ,)
parser.add_argument(
"--project_dir" ,type=__snake_case ,default="logs" ,help="Location on where to store experiment tracking logs` and relevent project information" ,)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : List[Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main() | 58 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : str = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" ,__snake_case ).groups()[0]
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Any=None) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = file_names
__lowerCAmelCase : Optional[int] = image_transform
__lowerCAmelCase : List[Any] = label_to_id
def __len__( self: Union[str, Any]) -> int:
"""simple docstring"""
return len(self.file_names)
def __getitem__( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.file_names[idx]
__lowerCAmelCase : List[str] = PIL.Image.open(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = raw_image.convert("RGB")
if self.image_transform is not None:
__lowerCAmelCase : Union[str, Any] = self.image_transform(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = extract_label(_SCREAMING_SNAKE_CASE)
if self.label_to_id is not None:
__lowerCAmelCase : str = self.label_to_id[label]
return {"image": image, "label": label}
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
# Initialize accelerator
if args.with_tracking:
__lowerCAmelCase : str = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="all" ,project_dir=args.project_dir )
else:
__lowerCAmelCase : Any = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : int = config["lr"]
__lowerCAmelCase : Union[str, Any] = int(config["num_epochs"] )
__lowerCAmelCase : Tuple = int(config["seed"] )
__lowerCAmelCase : Tuple = int(config["batch_size"] )
__lowerCAmelCase : int = config["image_size"]
if not isinstance(__snake_case ,(list, tuple) ):
__lowerCAmelCase : Tuple = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps ,"isdigit" ):
if args.checkpointing_steps == "epoch":
__lowerCAmelCase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__lowerCAmelCase : Dict = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__lowerCAmelCase : int = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__lowerCAmelCase : Dict = os.path.split(__snake_case )[-1].split("." )[0]
accelerator.init_trackers(__snake_case ,__snake_case )
# Grab all the image filenames
__lowerCAmelCase : Union[str, Any] = [os.path.join(args.data_dir ,__snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
__lowerCAmelCase : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
__lowerCAmelCase : Any = list(set(__snake_case ) )
id_to_label.sort()
__lowerCAmelCase : Optional[Any] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
__lowerCAmelCase : List[str] = np.random.permutation(len(__snake_case ) )
__lowerCAmelCase : Dict = int(0.8 * len(__snake_case ) )
__lowerCAmelCase : str = random_perm[:cut]
__lowerCAmelCase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__lowerCAmelCase : str = Compose([RandomResizedCrop(__snake_case ,scale=(0.5, 1.0) ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset(
[file_names[i] for i in train_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
__lowerCAmelCase : Union[str, Any] = Compose([Resize(__snake_case ), ToTensor()] )
__lowerCAmelCase : List[str] = PetsDataset([file_names[i] for i in eval_split] ,image_transform=__snake_case ,label_to_id=__snake_case )
# Instantiate dataloaders.
__lowerCAmelCase : Union[str, Any] = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
__lowerCAmelCase : Any = DataLoader(__snake_case ,shuffle=__snake_case ,batch_size=__snake_case ,num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : int = create_model("resnet50d" ,pretrained=__snake_case ,num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : List[str] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__lowerCAmelCase : Any = False
for param in model.get_classifier().parameters():
__lowerCAmelCase : List[Any] = True
# We normalize the batches of images to be a bit faster.
__lowerCAmelCase : Optional[Any] = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
__lowerCAmelCase : int = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : int = torch.optim.Adam(params=model.parameters() ,lr=lr / 25 )
# Instantiate learning rate scheduler
__lowerCAmelCase : List[Any] = OneCycleLR(optimizer=__snake_case ,max_lr=__snake_case ,epochs=__snake_case ,steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.prepare(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
# We need to keep track of how many total steps we have iterated over
__lowerCAmelCase : Dict = 0
# We also need to keep track of the starting epoch so files are named properly
__lowerCAmelCase : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__lowerCAmelCase : Optional[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__lowerCAmelCase : Optional[int] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__lowerCAmelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__lowerCAmelCase : str = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
__lowerCAmelCase : Dict = int(training_difference.replace("epoch_" ,"" ) ) + 1
__lowerCAmelCase : Optional[Any] = None
else:
__lowerCAmelCase : Any = int(training_difference.replace("step_" ,"" ) )
__lowerCAmelCase : Optional[int] = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case ,__snake_case ):
model.train()
if args.with_tracking:
__lowerCAmelCase : Any = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__lowerCAmelCase : Optional[int] = accelerator.skip_first_batches(__snake_case ,__snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__lowerCAmelCase : Optional[int] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Union[str, Any] = (batch["image"] - mean) / std
__lowerCAmelCase : Optional[int] = model(__snake_case )
__lowerCAmelCase : List[str] = torch.nn.functional.cross_entropy(__snake_case ,batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case ,__snake_case ):
__lowerCAmelCase : List[Any] = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__lowerCAmelCase : Tuple = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
model.eval()
__lowerCAmelCase : int = 0
__lowerCAmelCase : Optional[int] = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__lowerCAmelCase : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
__lowerCAmelCase : Optional[Any] = (batch["image"] - mean) / std
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(__snake_case )
__lowerCAmelCase : List[str] = outputs.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["label"]) )
__lowerCAmelCase : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__lowerCAmelCase : Optional[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__snake_case ),
"epoch": epoch,
} ,step=__snake_case ,)
if checkpointing_steps == "epoch":
__lowerCAmelCase : Tuple = F"""epoch_{epoch}"""
if args.output_dir is not None:
__lowerCAmelCase : Optional[Any] = os.path.join(args.output_dir ,__snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" ,required=__snake_case ,help="The data folder on disk." )
parser.add_argument("--fp16" ,action="store_true" ,help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" ,type=__snake_case ,default=__snake_case ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" ,type=__snake_case ,default=__snake_case ,help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." ,)
parser.add_argument(
"--output_dir" ,type=__snake_case ,default="." ,help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." ,)
parser.add_argument(
"--resume_from_checkpoint" ,type=__snake_case ,default=__snake_case ,help="If the training should continue from a checkpoint folder." ,)
parser.add_argument(
"--with_tracking" ,action="store_true" ,help="Whether to load in all available experiment trackers from the environment and use them for logging." ,)
parser.add_argument(
"--project_dir" ,type=__snake_case ,default="logs" ,help="Location on where to store experiment tracking logs` and relevent project information" ,)
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : List[Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__snake_case ,__snake_case )
if __name__ == "__main__":
main() | 58 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ) -> Tuple:
'''simple docstring'''
a__: Any = parent
a__: int = batch_size
a__: str = seq_length
a__: Union[str, Any] = is_training
a__: str = use_attention_mask
a__: Tuple = use_token_type_ids
a__: List[Any] = use_labels
a__: str = vocab_size
a__: int = hidden_size
a__: Tuple = num_hidden_layers
a__: Any = num_attention_heads
a__: Optional[int] = intermediate_size
a__: List[str] = hidden_act
a__: List[str] = hidden_dropout_prob
a__: Optional[Any] = attention_probs_dropout_prob
a__: List[Any] = max_position_embeddings
a__: Dict = type_vocab_size
a__: Optional[Any] = type_sequence_label_size
a__: List[str] = initializer_range
a__: Tuple = num_choices
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__: Union[str, Any] = None
if self.use_attention_mask:
a__: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
a__: Dict = None
if self.use_token_type_ids:
a__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__: Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[Any] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: Tuple = config_and_inputs
a__: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Optional[int] = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__: Any = config_and_inputs
a__: Optional[int] = True
a__: List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a__: Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = True
a__ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: Optional[Any] = FlaxRobertaPreLayerNormModelTester(self)
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a__: List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: List[str] = model(np.ones((1, 1)))
self.assertIsNotNone(lowercase)
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: Union[str, Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa)
a__: Optional[int] = model(lowercase)[0]
a__: Dict = [1, 11, 5_02_65]
self.assertEqual(list(output.shape) , lowercase)
# compare the actual values for a slice.
a__: Optional[int] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: Dict = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase)
a__: Dict = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa)
a__: Union[str, Any] = model(lowercase)[0]
# compare the actual values for a slice.
a__: str = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
| 290 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __snake_case ( __lowerCAmelCase ):
a__ = """audio-spectrogram-transformer"""
def __init__( self , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=16 , lowercase=True , lowercase=10 , lowercase=10 , lowercase=10_24 , lowercase=1_28 , **lowercase , ) -> str:
'''simple docstring'''
super().__init__(**lowercase)
a__: Any = hidden_size
a__: int = num_hidden_layers
a__: Union[str, Any] = num_attention_heads
a__: Any = intermediate_size
a__: Union[str, Any] = hidden_act
a__: int = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: str = initializer_range
a__: Tuple = layer_norm_eps
a__: Any = patch_size
a__: int = qkv_bias
a__: Optional[Any] = frequency_stride
a__: int = time_stride
a__: List[str] = max_length
a__: Tuple = num_mel_bins
| 290 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gpt_neox'
def __init__( self, __a=5_0432, __a=6144, __a=44, __a=64, __a=2_4576, __a="gelu", __a=0.25, __a=1_0000, __a=0.0, __a=0.0, __a=0.1, __a=2048, __a=0.02, __a=1E-5, __a=True, __a=0, __a=2, __a=False, __a=True, __a=None, **__a, ):
'''simple docstring'''
super().__init__(bos_token_id=__a, eos_token_id=__a, **__a)
_lowerCAmelCase : Tuple = vocab_size
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = rotary_pct
_lowerCAmelCase : List[Any] = rotary_emb_base
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : Any = hidden_dropout
_lowerCAmelCase : int = classifier_dropout
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[int] = tie_word_embeddings
_lowerCAmelCase : Union[str, Any] = use_parallel_residual
_lowerCAmelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def snake_case__ ( self):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, __a) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}")
_lowerCAmelCase : List[str] = self.rope_scaling.get("type", __a)
_lowerCAmelCase : Union[str, Any] = self.rope_scaling.get("factor", __a)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(__a, __a) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 300 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
@property
def snake_case__ ( self):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ort.SessionOptions()
_lowerCAmelCase : int = False
return options
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png")
_lowerCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png")
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy")
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=__a, feature_extractor=__a, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "A red cat sitting on a park bench"
_lowerCAmelCase : Optional[Any] = np.random.RandomState(0)
_lowerCAmelCase : Any = pipe(
prompt=__a, image=__a, mask_image=__a, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=__a, output_type="np", )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1E-2
| 300 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = IFInpaintingSuperResolutionPipeline
__UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple=0 ):
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
_A: Tuple = torch.manual_seed(__UpperCAmelCase )
else:
_A: Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_A: Optional[Any] = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A: Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A: Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_A: Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : str ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ ( self : str ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 121 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=1_8 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = size if size is not None else {'height': 1_8, 'width': 1_8}
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :List[Any] = batch_size
lowerCAmelCase__ :List[Any] = num_channels
lowerCAmelCase__ :Any = image_size
lowerCAmelCase__ :int = min_resolution
lowerCAmelCase__ :int = max_resolution
lowerCAmelCase__ :Dict = do_resize
lowerCAmelCase__ :str = size
lowerCAmelCase__ :Any = apply_ocr
def snake_case ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) )
self.assertTrue(hasattr(__UpperCAmelCase , 'apply_ocr' ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 1_8} )
lowerCAmelCase__ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase )
self.assertIsInstance(encoding.boxes , __UpperCAmelCase )
# Test batched
lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase__ :Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowerCAmelCase__ :Any = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ :Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
lowerCAmelCase__ :int = Image.open(ds[0]['file'] ).convert('RGB' )
lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ :Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCAmelCase__ :List[str] = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase )
self.assertListEqual(encoding.boxes , __UpperCAmelCase )
# with apply_OCR = False
lowerCAmelCase__ :int = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = image_processing(__UpperCAmelCase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 293 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'convbert'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=768 , __lowerCAmelCase=2 , __lowerCAmelCase=9 , __lowerCAmelCase=1 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = embedding_size
lowercase = head_ratio
lowercase = conv_kernel_size
lowercase = num_groups
lowercase = classifier_dropout
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 32 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase : Union[str, Any] = np.zeros((n + 1,) )
__lowerCAmelCase : Union[str, Any] = ya
__lowerCAmelCase : List[str] = xa
for k in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = f(_UpperCamelCase , y[k] )
__lowerCAmelCase : List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase : Optional[Any] = f(x + h , y[k] + h * ka )
__lowerCAmelCase : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
create_state_space_tree(_SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )] )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if index == len(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE )
return
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase = True
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE )
current_sequence.pop()
UpperCamelCase = False
lowerCAmelCase__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase__ = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 351 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _a ( nn.Module ):
'''simple docstring'''
A : int
A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = hidden_states.shape
SCREAMING_SNAKE_CASE : List[str] = jax.image.resize(
A, shape=(batch, height * 2, width * 2, channels), method='nearest', )
SCREAMING_SNAKE_CASE : str = self.conv(A )
return hidden_states
class _a ( nn.Module ):
'''simple docstring'''
A : int
A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
self.out_channels, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
def __call__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.conv(A )
return hidden_states
class _a ( nn.Module ):
'''simple docstring'''
A : int
A : int = None
A : float = 0.0
A : bool = None
A : jnp.dtype = jnp.floataa
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE : Optional[int] = nn.GroupNorm(num_groups=32, epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Tuple = nn.Conv(
A, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dense(A, dtype=self.dtype )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.GroupNorm(num_groups=32, epsilon=1E-5 )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Conv(
A, kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
SCREAMING_SNAKE_CASE : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE : List[str] = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv(
A, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype, )
def __call__( self, A, A, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = hidden_states
SCREAMING_SNAKE_CASE : str = self.norma(A )
SCREAMING_SNAKE_CASE : Dict = nn.swish(A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.conva(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_emb_proj(nn.swish(A ) )
SCREAMING_SNAKE_CASE : int = jnp.expand_dims(jnp.expand_dims(A, 1 ), 1 )
SCREAMING_SNAKE_CASE : Optional[int] = hidden_states + temb
SCREAMING_SNAKE_CASE : List[str] = self.norma(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.swish(A )
SCREAMING_SNAKE_CASE : List[Any] = self.dropout(A, A )
SCREAMING_SNAKE_CASE : List[str] = self.conva(A )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_shortcut(A )
return hidden_states + residual
| 251 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: tuple[int, ...] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def lowercase__( __UpperCamelCase: list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[str] = []
for key in product(__UpperCamelCase ,repeat=3 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = try_key(__UpperCamelCase ,__UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def lowercase__( __UpperCamelCase: list[str] ,__UpperCamelCase: str ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowercase__( __UpperCamelCase: str = "p059_cipher.txt" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int]
SCREAMING_SNAKE_CASE : list[str]
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str
SCREAMING_SNAKE_CASE : str = Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='utf-8' )
SCREAMING_SNAKE_CASE : Optional[int] = [int(__UpperCamelCase ) for number in data.strip().split(',' )]
SCREAMING_SNAKE_CASE : List[Any] = filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE : Optional[Any] = filter_common_word(__UpperCamelCase ,__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
SCREAMING_SNAKE_CASE : Dict = possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Tuple = logging.get_logger(__name__)
a : Optional[int] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'instructblip_vision_model'
def __init__( self , A=1408 , A=6144 , A=39 , A=16 , A=224 , A=14 , A="gelu" , A=1e-6 , A=0.0 , A=1e-10 , A=True , **A , ) -> List[Any]:
super().__init__(**A )
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Dict = intermediate_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Optional[Any] = patch_size
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : List[Any] = qkv_bias
@classmethod
def _lowercase( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A )
UpperCAmelCase : Union[str, Any] = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCAmelCase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'instructblip_qformer'
def __init__( self , A=30522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=1e-12 , A=0 , A="absolute" , A=2 , A=1408 , **A , ) -> Any:
super().__init__(pad_token_id=A , **A )
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : Dict = num_attention_heads
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : List[str] = position_embedding_type
UpperCAmelCase : Any = cross_attention_frequency
UpperCAmelCase : int = encoder_hidden_size
@classmethod
def _lowercase( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A )
UpperCAmelCase : Optional[int] = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
UpperCAmelCase : Dict = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'instructblip'
lowercase = True
def __init__( self , A=None , A=None , A=None , A=32 , **A ) -> Optional[Any]:
super().__init__(**A )
if vision_config is None:
UpperCAmelCase : Tuple = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
UpperCAmelCase : int = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
UpperCAmelCase : Any = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
UpperCAmelCase : int = InstructBlipVisionConfig(**A )
UpperCAmelCase : int = InstructBlipQFormerConfig(**A )
UpperCAmelCase : Optional[int] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[text_model_type](**A )
UpperCAmelCase : int = self.text_config.tie_word_embeddings
UpperCAmelCase : str = self.text_config.is_encoder_decoder
UpperCAmelCase : str = num_query_tokens
UpperCAmelCase : Optional[int] = self.vision_config.hidden_size
UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCAmelCase : Optional[int] = 1.0
UpperCAmelCase : Optional[Any] = 0.0_2
@classmethod
def _lowercase( cls , A , A , A , **A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Union[str, Any] = self.vision_config.to_dict()
UpperCAmelCase : Optional[int] = self.qformer_config.to_dict()
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : int = self.__class__.model_type
return output
| 367 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'levit'
def __init__( self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.0_2 , **A , ) -> int:
super().__init__(**A )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : Optional[int] = stride
UpperCAmelCase : Dict = padding
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : Any = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 338 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__A =2_0_4_8
__A =4_0_9_6
__A =4_2
__A =os.environ.pop('''PROCESS_TRAIN''', '''false''')
__A ={'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCamelCase_ ( lowerCamelCase__ ):
def choose_first(lowerCamelCase__ , lowerCamelCase__=False ):
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) == 1:
lowerCamelCase_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCamelCase_ = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCamelCase_ = {"id": example["id"]}
lowerCamelCase_ = example["annotations"]
lowerCamelCase_ = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCamelCase_ = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = lowerCamelCase_ = []
lowerCamelCase_ = ["<cls>"]
else:
lowerCamelCase_ = ["short"]
lowerCamelCase_ = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCamelCase_ = ["long"]
lowerCamelCase_ = choose_first(annotation["long_answer"] , is_long_answer=lowerCamelCase__ )
lowerCamelCase_ = []
answer.update(lowerCamelCase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCamelCase_ = True
else:
lowerCamelCase_ = False
lowerCamelCase_ = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowerCamelCase__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = _get_single_answer(lowerCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCamelCase_ = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCamelCase_ = example["document"]["tokens"]
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
lowerCamelCase_ = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCamelCase_ = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCamelCase_ = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCamelCase_ = " ".join([old[i] for i in range(len(lowerCamelCase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCamelCase__ , end="\n" )
print("Old:" , lowerCamelCase__ , end="\n\n" )
return {
"context": " ".join(lowerCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=True ):
# overlap will be of doc_stride - q_len
lowerCamelCase_ = get_context_and_ans(lowerCamelCase__ , assertion=lowerCamelCase__ )
lowerCamelCase_ = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCamelCase_ = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCamelCase_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(lowerCamelCase__ ),
"end_token": [-1_0_0] * len(lowerCamelCase__ ),
"category": category,
},
}
lowerCamelCase_ = out["context"].split()
lowerCamelCase_ = splitted_context[answer["end_token"]]
lowerCamelCase_ = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCamelCase__ , ).input_ids )
lowerCamelCase_ = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCamelCase_ = len(tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCamelCase_ = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCamelCase_ = answer["start_token"]
lowerCamelCase_ = answer["end_token"]
if assertion:
lowerCamelCase_ = tokenizer.decode(lowerCamelCase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCamelCase__ , end="\n\n" )
if len(lowerCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCamelCase_ = input_ids[:q_len]
lowerCamelCase_ = range(lowerCamelCase__ , len(lowerCamelCase__ ) , max_length - doc_stride )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCamelCase_ = i + max_length - q_len
lowerCamelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCamelCase_ = start_token - i + q_len
lowerCamelCase_ = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCamelCase_ = -1_0_0
lowerCamelCase_ = -1_0_0
answers_category.append("null" )
lowerCamelCase_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase__ )
answers_end_token.append(lowerCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCamelCase__ ) )
print("Old:" , tokenizer.decode(lowerCamelCase__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=False ):
lowerCamelCase_ = get_strided_contexts_and_ans(
lowerCamelCase__ , lowerCamelCase__ , doc_stride=lowerCamelCase__ , max_length=lowerCamelCase__ , assertion=lowerCamelCase__ , )
return example
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
with jsonlines.open(lowerCamelCase__ , "a" ) as writer:
for example in tqdm(lowerCamelCase__ , total=len(lowerCamelCase__ ) , desc="Saving samples ... " ):
lowerCamelCase_ = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__A =load_dataset('''natural_questions''')
__A =BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
__A =data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
__A ={
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
__A =data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__A =data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
__A ='''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 19 |
_lowercase : Optional[Any] =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
a__ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase : list[bool | None] =[None] * 1000_0000
_lowercase : Tuple =True
_lowercase : int =False
def lowerCAmelCase_ ( _lowercase : int) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a__ : Optional[Any] = chain(next_number(_lowercase))
a__ : Dict = number_chain
while number < 1000_0000:
a__ : Any = number_chain
number *= 10
return number_chain
def lowerCAmelCase_ ( _lowercase : int = 1000_0000) -> int:
"""simple docstring"""
for i in range(1 , _lowercase):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(_lowercase)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 170 | 0 |
"""simple docstring"""
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = {} # Mapping from char to TrieNode
__lowerCAmelCase = False
def snake_case ( self , __a ):
for word in words:
self.insert(__a )
def snake_case ( self , __a ):
__lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
__lowerCAmelCase = TrieNode()
__lowerCAmelCase = curr.nodes[char]
__lowerCAmelCase = True
def snake_case ( self , __a ):
__lowerCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
__lowerCAmelCase = curr.nodes[char]
return curr.is_leaf
def snake_case ( self , __a ):
def _delete(__a , __a , __a ) -> bool:
if index == len(__a ):
# If word does not exist
if not curr.is_leaf:
return False
__lowerCAmelCase = False
return len(curr.nodes ) == 0
__lowerCAmelCase = word[index]
__lowerCAmelCase = curr.nodes.get(__a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__lowerCAmelCase = _delete(__a , __a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __a , 0 )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if node.is_leaf:
print(lowerCamelCase__ , end=" " )
for key, value in node.nodes.items():
print_words(lowerCamelCase__ , word + key )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "banana bananas bandana band apple all beast".split()
__lowerCAmelCase = TrieNode()
root.insert_many(lowerCamelCase__ )
# print_words(root, "")
assert all(root.find(lowerCamelCase__ ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(str(lowerCamelCase__ ) , "works!" if passes else "doesn't work :(" )
def _lowerCamelCase ( ):
'''simple docstring'''
assert test_trie()
def _lowerCamelCase ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 350 |
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 259 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCamelCase):
A_ : Tuple = ['transformers', 'torch', 'note_seq']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 86 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Any=2_2_4 , _lowerCAmelCase : Any=1_0_0_0 , _lowerCAmelCase : Any=[3, 3, 6, 4] , _lowerCAmelCase : Any=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = num_labels
snake_case_ = image_size
snake_case_ = layer_depths
snake_case_ = embed_dims
def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
((snake_case_) , (snake_case_) , (snake_case_)) = self.prepare_config_and_inputs()
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ = SwiftFormerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def lowerCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
snake_case_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
def _config_zero_init(_lowerCAmelCase : List[str] ):
snake_case_ = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
snake_case_ = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( )->str:
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_lowerCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_lowerCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
snake_case_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 159 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Any = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 350 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case : Any = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
lowerCAmelCase__ = size if size is not None else {'shortest_edge': 2_56}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase__ = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(_UpperCamelCase )
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
lowerCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 122 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = {}
state_dict.pop('pixel_mean' , _SCREAMING_SNAKE_CASE )
state_dict.pop('pixel_std' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = r'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase__ :Union[str, Any] = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
lowerCAmelCase__ :Optional[Any] = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowerCAmelCase__ :List[Any] = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowerCAmelCase__ :str = key.replace('layers.2' , 'proj_out' )
lowerCAmelCase__ :List[Any] = value
lowerCAmelCase__ :int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
lowerCAmelCase__ :List[str] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase__ :Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase__ :Optional[Any] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
lowerCAmelCase__ :str = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase__ :List[str] = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :Dict = replace_keys(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = SamImageProcessor()
lowerCAmelCase__ :str = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = hf_model.to('cuda' )
lowerCAmelCase__ :Tuple = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCAmelCase__ :str = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowerCAmelCase__ :str = [[[400, 650]]]
lowerCAmelCase__ :Optional[int] = [[1]]
lowerCAmelCase__ :List[Any] = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase__ :Any = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCAmelCase__ :List[Any] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase__ :int = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCAmelCase__ :Dict = ((75, 275, 1725, 850),)
lowerCAmelCase__ :Any = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase__ :Any = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCAmelCase__ :List[str] = [[[400, 650], [800, 650]]]
lowerCAmelCase__ :Optional[int] = [[1, 1]]
lowerCAmelCase__ :List[str] = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase__ :List[str] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__A = argparse.ArgumentParser()
__A = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__A = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 293 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = XGLMTokenizer
__magic_name__ :Any = XGLMTokenizerFast
__magic_name__ :Dict = True
__magic_name__ :Union[str, Any] = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name )
lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello World!'
lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
| 293 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
snake_case_ = ''''''
snake_case_ = ''''''
snake_case_ = ''''''
snake_case_ = 1 # (0 is vertical, 1 is horizontal)
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('Processing...' )
lowercase__ : Optional[int] = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : Dict = random_chars(32 )
lowercase__ : int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
lowercase__ : Optional[Any] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}""" )
lowercase__ : str = []
for anno in new_annos[index]:
lowercase__ : int = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Any = []
lowercase__ : Tuple = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '*.txt' ) ):
lowercase__ : int = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
lowercase__ : int = in_file.readlines()
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{label_name}.jpg""" )
lowercase__ : Dict = []
for obj_list in obj_lists:
lowercase__ : List[str] = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ):
'''simple docstring'''
lowercase__ : List[Any] = []
lowercase__ : Optional[int] = []
lowercase__ : Optional[Any] = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : List[Any] = []
lowercase__ : List[str] = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = anno_list[idx]
lowercase__ : List[Any] = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
lowercase__ : Union[str, Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
lowercase__ : Optional[Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase__ : List[Any] = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
lowercase__ : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : List[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 370 |
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 50_000_000 ):
'''simple docstring'''
lowercase__ : List[Any] = set()
lowercase__ : Any = int((limit - 24) ** (1 / 2) )
lowercase__ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE_ ) ) )
for primea in primes:
lowercase__ : Any = primea * primea
for primea in primes:
lowercase__ : Optional[int] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__ : Dict = primea * primea * primea * primea
lowercase__ : List[Any] = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 216 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ) ->List[Any]:
_SCREAMING_SNAKE_CASE = original_name.split(""".""" )[0]
_SCREAMING_SNAKE_CASE = key.split(""".""" )
_SCREAMING_SNAKE_CASE = int(key_list[key_list.index(__lowerCamelCase ) - 2] )
_SCREAMING_SNAKE_CASE = int(key_list[key_list.index(__lowerCamelCase ) - 1] )
_SCREAMING_SNAKE_CASE = orig_block_num - offset
_SCREAMING_SNAKE_CASE = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCamelCase ( __lowerCamelCase : Optional[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = OrderedDict()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
_SCREAMING_SNAKE_CASE = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
_SCREAMING_SNAKE_CASE = key[: key.find("""proj""" )]
_SCREAMING_SNAKE_CASE = key.replace(__lowerCamelCase , F'patch_embeddings.{total_embed_found}.' )
_SCREAMING_SNAKE_CASE = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
_SCREAMING_SNAKE_CASE = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """norm1""" , """before_norm""" )
if "norm2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
_SCREAMING_SNAKE_CASE = replace_key_with_offset(__lowerCamelCase , __lowerCamelCase , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
_SCREAMING_SNAKE_CASE = key.replace("""head""" , """classifier""" )
_SCREAMING_SNAKE_CASE = value
return new_state_dict
def lowerCamelCase ( ) ->int:
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = PoolFormerConfig()
# set attributes based on model_name
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = model_name[-3:]
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = (1, 1000)
# set config attributes
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if size == "s12":
_SCREAMING_SNAKE_CASE = [2, 2, 6, 2]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 0.9
elif size == "s24":
_SCREAMING_SNAKE_CASE = [4, 4, 12, 4]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 0.9
elif size == "s36":
_SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
_SCREAMING_SNAKE_CASE = [64, 128, 320, 512]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.9
elif size == "m36":
_SCREAMING_SNAKE_CASE = [6, 6, 18, 6]
_SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.95
elif size == "m48":
_SCREAMING_SNAKE_CASE = [8, 8, 24, 8]
_SCREAMING_SNAKE_CASE = [96, 192, 384, 768]
_SCREAMING_SNAKE_CASE = 4.0
_SCREAMING_SNAKE_CASE = 1e-6
_SCREAMING_SNAKE_CASE = 0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=__lowerCamelCase )
# Prepare image
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
_SCREAMING_SNAKE_CASE = rename_keys(__lowerCamelCase )
# create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE = PoolFormerForImageClassification(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Define image processor
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor(crop_pct=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = outputs.logits
# define expected logit slices for different models
if size == "s12":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
_SCREAMING_SNAKE_CASE = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
_SCREAMING_SNAKE_CASE = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
_SCREAMING_SNAKE_CASE = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
_SCREAMING_SNAKE_CASE = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 58 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] = 2000000 ):
return sum(takewhile(lambda snake_case_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 369 |
import sys
__lowerCamelCase : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
snake_case__ : Any = -sys.maxsize - 1
for i in range(len(snake_case_ ) - 12 ):
snake_case__ : Tuple = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : Dict = product
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 286 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( A__ , A__ , A__ , A__ , ) -> list[float]:
"""simple docstring"""
UpperCamelCase = coefficient_matrix.shape
UpperCamelCase = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(__lowercase )
if colsa != 1:
UpperCamelCase = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(__lowercase )
if rowsa != rowsa:
UpperCamelCase = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(__lowercase )
if len(__lowercase ) != rowsa:
UpperCamelCase = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(__lowercase )} and {rowsa}"""
)
raise ValueError(__lowercase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
UpperCamelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase = table.shape
strictly_diagonally_dominant(__lowercase )
# Iterates the whole matrix for given number of times
for _ in range(__lowercase ):
UpperCamelCase = []
for row in range(__lowercase ):
UpperCamelCase = 0
for col in range(__lowercase ):
if col == row:
UpperCamelCase = table[row][col]
elif col == cols - 1:
UpperCamelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase = (temp + val) / denom
new_val.append(__lowercase )
UpperCamelCase = new_val
return [float(__lowercase ) for i in new_val]
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = table.shape
UpperCamelCase = True
for i in range(0 , __lowercase ):
UpperCamelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Dict ):
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[Any] = []
for i in range(self.num_layers ):
lowerCAmelCase : Dict = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = resnets
lowerCAmelCase : str = attentions
if self.add_downsample:
lowerCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=True ):
lowerCAmelCase : Optional[Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowerCAmelCase : List[Any] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase : str = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[Any] = []
for i in range(self.num_layers ):
lowerCAmelCase : List[str] = self.in_channels if i == 0 else self.out_channels
lowerCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = resnets
if self.add_downsample:
lowerCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=True ):
lowerCAmelCase : Any = ()
for resnet in self.resnets:
lowerCAmelCase : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowerCAmelCase : List[str] = self.downsamplers_a(UpperCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = []
lowerCAmelCase : List[str] = []
for i in range(self.num_layers ):
lowerCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = resnets
lowerCAmelCase : Union[str, Any] = attentions
if self.add_upsample:
lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowerCAmelCase : Tuple = res_hidden_states_tuple[-1]
lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
lowerCAmelCase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
lowerCAmelCase : Dict = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Tuple ):
lowerCAmelCase : int = []
for i in range(self.num_layers ):
lowerCAmelCase : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowerCAmelCase : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
lowerCAmelCase : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = resnets
if self.add_upsample:
lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=True ):
for resnet in self.resnets:
# pop res hidden states
lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
lowerCAmelCase : Tuple = res_hidden_states_tuple[:-1]
lowerCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowerCAmelCase : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
if self.add_upsample:
lowerCAmelCase : Tuple = self.upsamplers_a(UpperCAmelCase_ )
return hidden_states
class __A ( nn.Module ):
lowerCAmelCase_ : int
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def lowercase__ ( self : Any ):
# there is always at least one resnet
lowerCAmelCase : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowerCAmelCase : List[str] = []
for _ in range(self.num_layers ):
lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_ )
lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = resnets
lowerCAmelCase : Any = attentions
def __call__( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=True ):
lowerCAmelCase : List[str] = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowerCAmelCase : Optional[Any] = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
lowerCAmelCase : List[str] = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_ )
return hidden_states
| 353 |
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
a_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
a_ : List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , 'feat_extract.json' )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
a_ : List[str] = self.feature_extraction_class()
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 32 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : str = size if size is not None else {'shortest_edge': 2_5_6}
a_ : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = do_resize
a_ : Dict = size
a_ : Optional[Any] = resample
a_ : Optional[int] = do_center_crop
a_ : Dict = crop_size
a_ : int = do_rescale
a_ : int = rescale_factor
a_ : Tuple = do_normalize
a_ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
a_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a_ : Tuple = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> np.ndarray:
a_ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Union[str, Any]:
a_ : List[str] = do_resize if do_resize is not None else self.do_resize
a_ : Dict = size if size is not None else self.size
a_ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = resample if resample is not None else self.resample
a_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ : int = crop_size if crop_size is not None else self.crop_size
a_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ )
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : Any = do_normalize if do_normalize is not None else self.do_normalize
a_ : str = image_mean if image_mean is not None else self.image_mean
a_ : Dict = image_std if image_std is not None else self.image_std
a_ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
a_ : str = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
a_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
a_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
a_ : List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
a_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
a_ : Tuple = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 32 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_lowerCamelCase = {'mobilebert-uncased': 5_12}
_lowerCamelCase = {}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[Any] = MobileBertTokenizer
def __init__( self : int , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : int=True , __snake_case : Tuple="[UNK]" , __snake_case : List[str]="[SEP]" , __snake_case : int="[PAD]" , __snake_case : Any="[CLS]" , __snake_case : List[str]="[MASK]" , __snake_case : Tuple=True , __snake_case : Union[str, Any]=None , **__snake_case : List[Any] , ):
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , tokenize_chinese_chars=__snake_case , strip_accents=__snake_case , **__snake_case , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __snake_case ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(__snake_case , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**__snake_case )
UpperCAmelCase_ = do_lower_case
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any]=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
| 357 |
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[str]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 11
UpperCAmelCase_ = int('''1''' + '''0''' * digit_len )
for num in range(__UpperCamelCase , __UpperCamelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__UpperCamelCase , __UpperCamelCase ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
UpperCAmelCase_ = 10
return solutions
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 2 ) -> int:
UpperCAmelCase_ = 1.0
for fraction in fraction_list(__UpperCamelCase ):
UpperCAmelCase_ = Fraction(__UpperCamelCase )
result *= frac.denominator / frac.numerator
return int(__UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 177 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = "hf-internal-testing/tiny-random-t5"
a : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
a : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
a : Any = tokenizer("This is me" , return_tensors="pt" )
a : List[Any] = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
a : List[str] = model.generate(**lowerCAmelCase__ )
a : str = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
a : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
a : List[str] = model_reloaded.generate(**lowerCAmelCase__ )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Union[str, Any] = "hf-internal-testing/tiny-random-t5"
a : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
a : Optional[Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase__ ):
model.save_pretrained(lowerCAmelCase__ )
a : Optional[int] = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase__ )
| 105 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = list(UpperCamelCase__ )
_a : str = list(UpperCamelCase__ )
_a : int = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count += 1
_a : List[str] = """_"""
if count > 1:
return False
else:
return "".join(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = []
while True:
_a : Optional[int] = ["""$"""] * len(UpperCamelCase__ )
_a : Dict = []
for i in range(len(UpperCamelCase__ ) ):
for j in range(i + 1 , len(UpperCamelCase__ ) ):
_a : Union[str, Any] = compare_string(binary[i] , binary[j] )
if k is False:
_a : List[str] = """*"""
_a : Union[str, Any] = """*"""
temp.append("""X""" )
for i in range(len(UpperCamelCase__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase__ ) == 0:
return pi
_a : Union[str, Any] = list(set(UpperCamelCase__ ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : int = []
for minterm in minterms:
_a : str = """"""
for _ in range(UpperCamelCase__ ):
_a : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase__ )
return temp
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = list(UpperCamelCase__ )
_a : Tuple = list(UpperCamelCase__ )
_a : str = 0
for i in range(len(UpperCamelCase__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = []
_a : Any = [0] * len(UpperCamelCase__ )
for i in range(len(chart[0] ) ):
_a : int = 0
_a : Any = -1
for j in range(len(UpperCamelCase__ ) ):
if chart[j][i] == 1:
count += 1
_a : Tuple = j
if count == 1:
_a : Tuple = 1
for i in range(len(UpperCamelCase__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase__ ) ):
_a : Dict = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : Dict = -1
_a : int = 0
for i in range(len(UpperCamelCase__ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase__ ) ):
_a : Dict = 0
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [[0 for x in range(len(UpperCamelCase__ ) )] for x in range(len(UpperCamelCase__ ) )]
for i in range(len(UpperCamelCase__ ) ):
_a : Optional[int] = prime_implicants[i].count("""_""" )
for j in range(len(UpperCamelCase__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase__ ):
_a : Dict = 1
return chart
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = int(input("""Enter the no. of variables\n""" ) )
_a : Dict = [
float(UpperCamelCase__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_a : int = decimal_to_binary(UpperCamelCase__ , UpperCamelCase__ )
_a : Optional[int] = check(UpperCamelCase__ )
print("""Prime Implicants are:""" )
print(UpperCamelCase__ )
_a : Optional[Any] = prime_implicant_chart(UpperCamelCase__ , UpperCamelCase__ )
_a : List[Any] = selection(UpperCamelCase__ , UpperCamelCase__ )
print("""Essential Prime Implicants are:""" )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 354 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class a__( UpperCamelCase_ ):
lowercase__ = ["""image_processor""", """feature_extractor"""]
lowercase__ = """TvltImageProcessor"""
lowercase__ = """TvltFeatureExtractor"""
def __init__( self : int , __snake_case : int , __snake_case : Dict ):
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
a : Dict = image_processor
a : Tuple = feature_extractor
def __call__( self : str , __snake_case : List[str]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , __snake_case : List[Any]=None , __snake_case : Any=False , __snake_case : Optional[Any]=False , *__snake_case : Dict , **__snake_case : List[str] , ):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
a : int = None
if images is not None:
a : Optional[int] = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
a : int = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
a : str = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a : int = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def lowercase_ ( self : Tuple ):
a : List[Any] = self.image_processor.model_input_names
a : int = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 297 | class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Dict = filter(lambda lowerCamelCase__ : p.requires_grad , model.parameters() )
__lowerCamelCase : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a =logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
if metric == "rouge2":
__lowerCamelCase : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCamelCase : Union[str, Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCamelCase : Optional[Any] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
__lowerCamelCase : List[str] = ModelCheckpoint(
dirpath=lowerCamelCase__ , filename=lowerCamelCase__ , monitor=F"val_{metric}" , mode='max' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=lowerCamelCase__ , verbose=lowerCamelCase__ , )
class A_ ( pl.Callback ):
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Union[str, Any] = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__)
@rank_zero_only
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : pl.Trainer ,SCREAMING_SNAKE_CASE__ : pl.LightningModule ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
__lowerCamelCase : Any = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
__lowerCamelCase : int = Path(pl_module.hparams.output_dir)
if type_path == "test":
__lowerCamelCase : int = od / 'test_results.txt'
__lowerCamelCase : Optional[int] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCamelCase : List[str] = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
__lowerCamelCase : Tuple = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__)
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__)
with open(SCREAMING_SNAKE_CASE__ ,'a+') as writer:
for key in sorted(SCREAMING_SNAKE_CASE__):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCamelCase : Dict = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor):
__lowerCamelCase : List[Any] = val.item()
__lowerCamelCase : Tuple = F"{key}: {val:.6f}\n"
writer.write(SCREAMING_SNAKE_CASE__)
if not save_generations:
return
if "preds" in metrics:
__lowerCamelCase : Dict = '\n'.join(metrics['preds'])
generations_file.open('w+').write(SCREAMING_SNAKE_CASE__)
@rank_zero_only
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
try:
__lowerCamelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCamelCase : Tuple = pl_module.model.num_parameters()
__lowerCamelCase : Optional[int] = count_trainable_parameters(SCREAMING_SNAKE_CASE__)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : pl.Trainer ,SCREAMING_SNAKE_CASE__ : pl.LightningModule):
save_json(pl_module.metrics ,pl_module.metrics_save_path)
return self._write_logs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,'test')
@rank_zero_only
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : pl.Trainer ,SCREAMING_SNAKE_CASE__ : int):
save_json(pl_module.metrics ,pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 113 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : List[str]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Tuple):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Any):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = AutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Union[str, Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
| 113 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase = logging.getLogger()
def UpperCamelCase ( ):
snake_case : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case : Tuple = parser.parse_args()
return args.f
class UpperCAmelCase ( A_ ):
def _SCREAMING_SNAKE_CASE (self : str ) -> None:
'''simple docstring'''
snake_case : int = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : Optional[int] ) -> str:
'''simple docstring'''
snake_case : int = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(snake_case__ , "argv" , snake_case__ ):
snake_case : List[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(snake_case__ , 0.666 )
@slow
@require_torch_non_multi_gpu
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(snake_case__ )
snake_case : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case__ )
snake_case : int = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(snake_case__ )
| 59 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__snake_case ):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
__UpperCAmelCase : Any = FlaxAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__snake_case ):
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
__UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
__UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(__snake_case )
__UpperCAmelCase : Optional[int] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase : List[Any] ):
return model(**__snake_case )
eval(**__snake_case ).block_until_ready()
@slow
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__snake_case )
__UpperCAmelCase : Tuple = FlaxRobertaModel.from_pretrained(__snake_case )
__UpperCAmelCase : Optional[int] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase : List[str] ):
return model(**__snake_case )
eval(**__snake_case ).block_until_ready()
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , """bert-base is not a local folder and is not a valid model identifier""" ):
__UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__UpperCAmelCase : List[Any] = FlaxAutoModel.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
__UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(__snake_case , """Use `from_pt=True` to load this model""" ):
__UpperCAmelCase : str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 353 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 320 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase_ ( unittest.TestCase ):
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase_ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase , [config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase_ = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase , configs=[config] )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__UpperCamelCase , """env.csv""" ) , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , """env.csv""" ) ).exists() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__UpperCamelCase ):
self.assertTrue(hasattr(__UpperCamelCase , """sequential""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """cumulative""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """current""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , """log.txt""" ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , eager_mode=__UpperCamelCase , multi_process=__UpperCamelCase , )
UpperCamelCase_ = TensorFlowBenchmark(__UpperCamelCase )
UpperCamelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , """log.txt""" ) ).exists() )
| 122 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a__ ( *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 ) -> Union[str, Any]:
from .. import __version__
SCREAMING_SNAKE_CASE: int = take_from
SCREAMING_SNAKE_CASE: Any = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE: int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
F" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE: Any = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
SCREAMING_SNAKE_CASE: int = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
SCREAMING_SNAKE_CASE: Dict = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE: List[str] = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE: Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE: int = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE: Dict = call_frame.filename
SCREAMING_SNAKE_CASE: Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE: Any = call_frame.function
SCREAMING_SNAKE_CASE: Dict = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 371 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
__lowerCAmelCase: Dict = original_name.split("." )[0]
__lowerCAmelCase: Any = key.split("." )
__lowerCAmelCase: Union[str, Any] = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 2] )
__lowerCAmelCase: List[Any] = int(key_list[key_list.index(__SCREAMING_SNAKE_CASE ) - 1] )
__lowerCAmelCase: List[str] = orig_block_num - offset
__lowerCAmelCase: Tuple = key.replace(F"{orig_block_num}.{layer_num}.{original_name}" , F"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: List[Any] = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase: Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase: int = key[: key.find("proj" )]
__lowerCAmelCase: Dict = key.replace(__SCREAMING_SNAKE_CASE , F"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase: Optional[int] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase: int = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase: Optional[Any] = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase: Dict = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase: Optional[int] = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase: Any = replace_key_with_offset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase: int = key.replace("head" , "classifier" )
__lowerCAmelCase: Tuple = value
return new_state_dict
def a__ ( ) -> Tuple:
__lowerCAmelCase: Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase: int = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase: Any = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase: Any = "huggingface/label-files"
__lowerCAmelCase: int = model_name[-3:]
__lowerCAmelCase: List[Any] = 1_0_0_0
__lowerCAmelCase: Tuple = "imagenet-1k-id2label.json"
__lowerCAmelCase: str = (1, 1_0_0_0)
# set config attributes
__lowerCAmelCase: Dict = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase: List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase: Any = idalabel
__lowerCAmelCase: Any = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase: Dict = [2, 2, 6, 2]
__lowerCAmelCase: str = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: Optional[Any] = 4.0
__lowerCAmelCase: Union[str, Any] = 0.9
elif size == "s24":
__lowerCAmelCase: Tuple = [4, 4, 1_2, 4]
__lowerCAmelCase: List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: Tuple = 4.0
__lowerCAmelCase: Optional[int] = 0.9
elif size == "s36":
__lowerCAmelCase: int = [6, 6, 1_8, 6]
__lowerCAmelCase: int = [6_4, 1_2_8, 3_2_0, 5_1_2]
__lowerCAmelCase: List[str] = 4.0
__lowerCAmelCase: Dict = 1E-6
__lowerCAmelCase: List[Any] = 0.9
elif size == "m36":
__lowerCAmelCase: Dict = [6, 6, 1_8, 6]
__lowerCAmelCase: Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase: str = 4.0
__lowerCAmelCase: Union[str, Any] = 1E-6
__lowerCAmelCase: Union[str, Any] = 0.95
elif size == "m48":
__lowerCAmelCase: str = [8, 8, 2_4, 8]
__lowerCAmelCase: Optional[int] = [9_6, 1_9_2, 3_8_4, 7_6_8]
__lowerCAmelCase: str = 4.0
__lowerCAmelCase: int = 1E-6
__lowerCAmelCase: str = 0.95
else:
raise ValueError(F"Size {size} not supported" )
# load image processor
__lowerCAmelCase: Union[str, Any] = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
# Prepare image
__lowerCAmelCase: int = prepare_img()
__lowerCAmelCase: Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase: Optional[int] = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase: Any = rename_keys(__SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
__lowerCAmelCase: str = PoolFormerForImageClassification(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
__lowerCAmelCase: Any = PoolFormerImageProcessor(crop_pct=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase: int = model(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase: List[str] = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__lowerCAmelCase: Optional[int] = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__lowerCAmelCase: List[str] = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__lowerCAmelCase: Union[str, Any] = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__lowerCAmelCase: List[str] = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 108 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCAmelCase = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCAmelCase = 0
_lowerCAmelCase = 0xe_000
_lowerCAmelCase = 0xe_001
_lowerCAmelCase = 0xe_002
_lowerCAmelCase = 0xe_003
_lowerCAmelCase = 0xe_004
# Maps special codepoints to human-readable names.
_lowerCAmelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCAmelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=chr(__UpperCAmelCase ) ,__UpperCAmelCase=False ,__UpperCAmelCase=2048 ,**__UpperCAmelCase ,) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else bos_token
lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else eos_token
lowerCAmelCase__ : Union[str, Any] = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else sep_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else cls_token
lowerCAmelCase__ : int = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ : str = AddedToken(__UpperCAmelCase ,lstrip=__UpperCAmelCase ,rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else mask_token
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,add_prefix_space=__UpperCAmelCase ,model_max_length=__UpperCAmelCase ,**__UpperCAmelCase ,)
# Creates a mapping for looking up the IDs of special symbols.
lowerCAmelCase__ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowerCAmelCase__ : Tuple = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowerCAmelCase__ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowerCAmelCase__ : Optional[Any] = UNICODE_VOCAB_SIZE
lowerCAmelCase__ : Any = len(self._special_codepoints )
@property
def UpperCAmelCase_ ( self ) -> int:
return self._unicode_vocab_size
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
return list(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
try:
return ord(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid token: '{token}'""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__UpperCAmelCase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> int:
return "".join(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : str = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [1] + ([0] * len(__UpperCAmelCase )) + [1]
if token_ids_a is not None:
result += ([0] * len(__UpperCAmelCase )) + [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
lowerCAmelCase__ : Optional[int] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> int:
return ()
| 37 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : list[float] ):
__a : Dict = sorted(numsa + numsa )
__a , __a : Optional[Any] = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =[float(x) for x in input('Enter the elements of first array: ').split()]
lowercase__ =[float(x) for x in input('Enter the elements of second array: ').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 216 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( _lowercase , unittest.TestCase ):
a = CanineTokenizer
a = False
def lowerCamelCase_ ( self: Optional[Any] ):
super().setUp()
lowerCamelCase__ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self: Dict ):
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCamelCase_ ( self: str , **UpperCamelCase__: str ):
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
lowerCamelCase__ : Dict = 1_024
return tokenizer
@require_torch
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[str] = self.canine_tokenizer
lowerCamelCase__ : Tuple = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
lowerCamelCase__ : Optional[Any] = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase__ : str = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : str = self.canine_tokenizer
lowerCamelCase__ : int = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
lowerCamelCase__ : Union[str, Any] = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , UpperCamelCase__ )
self.assertIn("""attention_mask""" , UpperCamelCase__ )
self.assertIn("""token_type_ids""" , UpperCamelCase__ )
@require_torch
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[Any] = self.canine_tokenizer
lowerCamelCase__ : List[str] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
lowerCamelCase__ : str = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCamelCase_ ( self: Optional[Any] ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : str = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : Optional[int] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : List[Any] = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowerCamelCase__ : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Any = tempfile.mkdtemp()
lowerCamelCase__ : str = """ He is very happy, UNwant\u00E9d,running"""
lowerCamelCase__ : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase__ : int = chr(0xE007 )
additional_special_tokens.append(UpperCamelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCamelCase__ : List[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Tuple = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : int = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn(UpperCamelCase__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.get_clean_sequence(UpperCamelCase__ )
# a special token for Canine can be defined as follows:
lowerCamelCase__ : Tuple = 0xE005
lowerCamelCase__ : Optional[Any] = chr(UpperCamelCase__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCamelCase__ : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
lowerCamelCase__ : Optional[int] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCamelCase__ )
lowerCamelCase__ : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , input_encoded + special_token_id )
lowerCamelCase__ : Dict = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Any = chr(0xE005 )
lowerCamelCase__ : Tuple = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCamelCase__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
lowerCamelCase__ : int = tokenizer.tokenize(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.tokenize(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
self.assertEqual(token_a[0] , UpperCamelCase__ )
self.assertEqual(token_a[0] , UpperCamelCase__ )
@require_tokenizers
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Dict = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowerCamelCase__ : Tuple = 0xE006
lowerCamelCase__ : Any = chr(UpperCamelCase__ )
lowerCamelCase__ : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCamelCase__ )
tokenizer.from_pretrained(UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Dict = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCamelCase__ : Optional[int] = json.load(UpperCamelCase__ )
# a special token for Canine can be defined as follows:
lowerCamelCase__ : Union[str, Any] = 0xE006
lowerCamelCase__ : Optional[int] = chr(UpperCamelCase__ )
lowerCamelCase__ : int = [new_token_a]
lowerCamelCase__ : Any = [new_token_a]
with open(os.path.join(UpperCamelCase__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : str = tokenizer_class.from_pretrained(UpperCamelCase__ , extra_ids=0 )
self.assertIn(UpperCamelCase__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase__ : List[Any] = 0xE007
lowerCamelCase__ : Optional[int] = chr(UpperCamelCase__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Optional[int] = [AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ )]
lowerCamelCase__ : str = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , extra_ids=0 )
self.assertIn(UpperCamelCase__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[int] = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : Tuple = """hello world"""
if self.space_between_special_tokens:
lowerCamelCase__ : str = """[CLS] hello world [SEP]"""
else:
lowerCamelCase__ : str = input
lowerCamelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = tokenizer.decode(UpperCamelCase__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCamelCase__ , [output, output.lower()] )
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowerCamelCase__ : List[Any] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCamelCase__ : List[Any] = """a"""
lowerCamelCase__ : Tuple = ord(UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + """_id""" , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + """_id""" ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [] )
lowerCamelCase__ : int = 0xE006
lowerCamelCase__ : Optional[int] = chr(UpperCamelCase__ )
setattr(UpperCamelCase__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCamelCase__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: str ):
pass
def lowerCamelCase_ ( self: Optional[int] ):
pass
def lowerCamelCase_ ( self: int ):
pass
def lowerCamelCase_ ( self: int ):
pass
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: Optional[Any] ):
pass
| 129 |
'''simple docstring'''
from collections import deque
class _lowercase :
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : int = process_name # process name
lowerCamelCase__ : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : List[str] = arrival_time
lowerCamelCase__ : Tuple = burst_time # remaining burst time
lowerCamelCase__ : str = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Optional[Any] = 0 # time from arrival time to completion time
class _lowercase :
def __init__( self: Any , UpperCamelCase__: int , UpperCamelCase__: list[int] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int , ):
# total number of mlfq's queues
lowerCamelCase__ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[Any] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : int = queue
# current time
lowerCamelCase__ : Optional[int] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self: Any , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : Optional[int] = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : List[Any] = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self: int , UpperCamelCase__: deque[Process] ):
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: deque[Process] ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : List[str] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : int = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
lowerCamelCase__ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : List[Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self: Tuple ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Optional[Any] =Process('''P1''', 0, 53)
_A : List[Any] =Process('''P2''', 0, 17)
_A : Any =Process('''P3''', 0, 68)
_A : Tuple =Process('''P4''', 0, 24)
_A : int =3
_A : Tuple =[17, 25]
_A : List[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : Dict =Process('''P1''', 0, 53)
_A : Union[str, Any] =Process('''P2''', 0, 17)
_A : int =Process('''P3''', 0, 68)
_A : Dict =Process('''P4''', 0, 24)
_A : List[str] =3
_A : List[Any] =[17, 25]
_A : Any =deque([Pa, Pa, Pa, Pa])
_A : List[str] =MLFQ(number_of_queues, time_slices, queue, 0)
_A : Union[str, Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 129 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : list[int] , __A : str ) -> list[int]:
"""simple docstring"""
a_ : Any = int(__A )
# Initialize Result
a_ : Tuple = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Union[str, Any] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase_ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCAmelCase_ : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase_ : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
UpperCAmelCase_ : str = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F'Following is minimal change for {value}: ')
UpperCAmelCase_ : Optional[Any] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 32 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : Any = hex_num[0] == '-'
if is_negative:
A_ : Optional[Any] = hex_num[1:]
try:
A_ : Tuple = int(_UpperCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : Union[str, Any] = ''
while int_num > 0:
A_ : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 0 |
import requests
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
__a ={'Content-Type': 'application/json'}
__a =requests.post(_snake_case , json={'text': message_body} , headers=_snake_case )
if response.status_code != 200:
__a =(
'Request to slack returned an error '
F'{response.status_code}, the response is:\n{response.text}'
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 308 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_lowerCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , *__snake_case , **__snake_case ) -> None:
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 308 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
_snake_case = SwinvaConfig()
_snake_case = swinva_name.split('_' )
_snake_case = name_split[1]
if "to" in name_split[3]:
_snake_case = int(name_split[3][-3:] )
else:
_snake_case = int(name_split[3] )
if "to" in name_split[2]:
_snake_case = int(name_split[2][-2:] )
else:
_snake_case = int(name_split[2][6:] )
if model_size == "tiny":
_snake_case = 96
_snake_case = (2, 2, 6, 2)
_snake_case = (3, 6, 12, 24)
elif model_size == "small":
_snake_case = 96
_snake_case = (2, 2, 18, 2)
_snake_case = (3, 6, 12, 24)
elif model_size == "base":
_snake_case = 128
_snake_case = (2, 2, 18, 2)
_snake_case = (4, 8, 16, 32)
else:
_snake_case = 192
_snake_case = (2, 2, 18, 2)
_snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
_snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_snake_case = 21_841
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-22k-id2label.json'
_snake_case = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__A ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
else:
_snake_case = 1_000
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(__A ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = img_size
_snake_case = num_classes
_snake_case = embed_dim
_snake_case = depths
_snake_case = num_heads
_snake_case = window_size
return config
def SCREAMING_SNAKE_CASE__ ( __A ) -> Any:
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_snake_case = 'encoder.' + name
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_snake_case = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_snake_case = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_snake_case = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
_snake_case = 'layernorm.weight'
if name == "norm.bias":
_snake_case = 'layernorm.bias'
if "head" in name:
_snake_case = name.replace('head' , 'classifier' )
else:
_snake_case = 'swinv2.' + name
return name
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__A )
if "mask" in key:
continue
elif "qkv" in key:
_snake_case = key.split('.' )
_snake_case = int(key_split[1] )
_snake_case = int(key_split[3] )
_snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[
dim : dim * 2
]
_snake_case = val[-dim:]
else:
_snake_case = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Optional[Any]:
_snake_case = timm.create_model(__A , pretrained=__A )
timm_model.eval()
_snake_case = get_swinva_config(__A )
_snake_case = SwinvaForImageClassification(__A )
model.eval()
_snake_case = convert_state_dict(timm_model.state_dict() , __A )
model.load_state_dict(__A )
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
_snake_case = Image.open(requests.get(__A , stream=__A ).raw )
_snake_case = image_processor(images=__A , return_tensors='pt' )
_snake_case = timm_model(inputs['pixel_values'] )
_snake_case = model(**__A ).logits
assert torch.allclose(__A , __A , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase : Dict = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 42 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __UpperCAmelCase ):
__snake_case = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__, prev_timestep=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = 0.5
assert scheduler._get_variance(1, predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487, predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999, predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.scheduler_classes[0]
lowerCAmelCase_ = self.get_scheduler_config()
lowerCAmelCase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowerCAmelCase_ = scheduler.timesteps
lowerCAmelCase_ = self.dummy_model()
lowerCAmelCase_ = self.dummy_sample_deter
lowerCAmelCase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ = scheduler.step(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, prev_timestep=UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
lowerCAmelCase_ = pred_prev_sample
lowerCAmelCase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowerCAmelCase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
| 278 | 0 |
def lowerCamelCase (a_ :Dict) -> Dict:
lowercase :Dict = 0
lowercase :int = len(__UpperCAmelCase)
for i in range(n - 1):
for j in range(i + 1 , __UpperCAmelCase):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]:
if len(__UpperCAmelCase) <= 1:
return arr, 0
lowercase :str = len(__UpperCAmelCase) // 2
lowercase :List[Any] = arr[0:mid]
lowercase :str = arr[mid:]
lowercase :Dict = count_inversions_recursive(__UpperCAmelCase)
lowercase :Dict = count_inversions_recursive(__UpperCAmelCase)
lowercase :Union[str, Any] = _count_cross_inversions(__UpperCAmelCase , __UpperCAmelCase)
lowercase :Dict = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCamelCase (a_ :Optional[Any] , a_ :Dict) -> Union[str, Any]:
lowercase :Any = []
lowercase :int = 0
while i < len(__UpperCAmelCase) and j < len(__UpperCAmelCase):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCAmelCase) - i
r.append(q[j])
j += 1
else:
r.append(p[i])
i += 1
if i < len(__UpperCAmelCase):
r.extend(p[i:])
else:
r.extend(q[j:])
return r, num_inversion
def lowerCamelCase () -> int:
lowercase :Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase :Dict = count_inversions_bf(__UpperCAmelCase)
lowercase :Dict = count_inversions_recursive(__UpperCAmelCase)
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __UpperCAmelCase)
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase :List[Any] = count_inversions_bf(__UpperCAmelCase)
lowercase :List[Any] = count_inversions_recursive(__UpperCAmelCase)
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCAmelCase)
# an empty list should also have zero inversions
lowercase :Optional[Any] = []
lowercase :str = count_inversions_bf(__UpperCAmelCase)
lowercase :Dict = count_inversions_recursive(__UpperCAmelCase)
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __UpperCAmelCase)
if __name__ == "__main__":
main()
| 352 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCamelCase (a_ :str , a_ :str) -> str | Literal[False]:
lowercase :Union[str, Any] = list(a_)
lowercase :Optional[Any] = list(a_)
lowercase :str = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count += 1
lowercase :str = '''_'''
if count > 1:
return False
else:
return "".join(a_)
def lowerCamelCase (a_ :list[str]) -> list[str]:
lowercase :Optional[Any] = []
while True:
lowercase :Tuple = ['''$'''] * len(a_)
lowercase :Tuple = []
for i in range(len(a_)):
for j in range(i + 1 , len(a_)):
lowercase :Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowercase :Tuple = '''*'''
lowercase :Any = '''*'''
temp.append('''X''')
for i in range(len(a_)):
if checka[i] == "$":
pi.append(binary[i])
if len(a_) == 0:
return pi
lowercase :str = list(set(a_))
def lowerCamelCase (a_ :int , a_ :Sequence[float]) -> list[str]:
lowercase :Optional[int] = []
for minterm in minterms:
lowercase :List[str] = ''''''
for _ in range(a_):
lowercase :List[str] = str(minterm % 2) + string
minterm //= 2
temp.append(a_)
return temp
def lowerCamelCase (a_ :str , a_ :str , a_ :int) -> bool:
lowercase :int = list(a_)
lowercase :str = list(a_)
lowercase :List[str] = 0
for i in range(len(a_)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCamelCase (a_ :list[list[int]] , a_ :list[str]) -> list[str]:
lowercase :Any = []
lowercase :List[Any] = [0] * len(a_)
for i in range(len(chart[0])):
lowercase :List[Any] = 0
lowercase :int = -1
for j in range(len(a_)):
if chart[j][i] == 1:
count += 1
lowercase :List[Any] = j
if count == 1:
lowercase :Tuple = 1
for i in range(len(a_)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(a_)):
lowercase :List[str] = 0
temp.append(prime_implicants[i])
while True:
lowercase :Tuple = 0
lowercase :Dict = -1
lowercase :int = 0
for i in range(len(a_)):
lowercase :List[Any] = chart[i].count(1)
if count_n > max_n:
lowercase :List[Any] = count_n
lowercase :int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(a_)):
lowercase :Tuple = 0
def lowerCamelCase (a_ :list[str] , a_ :list[str]) -> list[list[int]]:
lowercase :Dict = [[0 for x in range(len(a_))] for x in range(len(a_))]
for i in range(len(a_)):
lowercase :Any = prime_implicants[i].count('''_''')
for j in range(len(a_)):
if is_for_table(prime_implicants[i] , binary[j] , a_):
lowercase :int = 1
return chart
def lowerCamelCase () -> None:
lowercase :int = int(input('''Enter the no. of variables\n'''))
lowercase :Tuple = [
float(a_)
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''').split()
]
lowercase :Dict = decimal_to_binary(a_ , a_)
lowercase :List[Any] = check(a_)
print('''Prime Implicants are:''')
print(a_)
lowercase :Union[str, Any] = prime_implicant_chart(a_ , a_)
lowercase :Dict = selection(a_ , a_)
print('''Essential Prime Implicants are:''')
print(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 172 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.