code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( a : List[Any] , a : List[Any] , a : Tuple ):
"""simple docstring"""
if openai_config_file == "":
_snake_case : Union[str, Any] = OpenAIGPTConfig()
else:
_snake_case : Tuple = OpenAIGPTConfig.from_json_file(a )
_snake_case : Optional[int] = OpenAIGPTModel(a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a , a , a )
# Save pytorch-model
_snake_case : List[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_snake_case : Union[str, Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_a : Optional[Any] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : List[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _snake_case):
__lowercase : torch.FloatTensor
class _UpperCAmelCase ( _snake_case , _snake_case):
@register_to_config
def __init__( self , snake_case_ = 6_55_36 , snake_case_ = None , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 0 , snake_case_ = "fourier" , snake_case_ = True , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case_ = "UNetMidBlock1D" , snake_case_ = None , snake_case_ = (32, 32, 64) , snake_case_ = None , snake_case_ = 8 , snake_case_ = 1 , snake_case_ = False , ):
super().__init__()
_snake_case : Optional[Any] = sample_size
# time
if time_embedding_type == "fourier":
_snake_case : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case_ , log=snake_case_ , flip_sin_to_cos=snake_case_ )
_snake_case : int = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_snake_case : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case_ , downscale_freq_shift=snake_case_ )
_snake_case : List[Any] = block_out_channels[0]
if use_timestep_embedding:
_snake_case : Dict = block_out_channels[0] * 4
_snake_case : str = TimestepEmbedding(
in_channels=snake_case_ , time_embed_dim=snake_case_ , act_fn=snake_case_ , out_dim=block_out_channels[0] , )
_snake_case : int = nn.ModuleList([] )
_snake_case : Any = None
_snake_case : str = nn.ModuleList([] )
_snake_case : Tuple = None
# down
_snake_case : str = in_channels
for i, down_block_type in enumerate(snake_case_ ):
_snake_case : Optional[int] = output_channel
_snake_case : List[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_snake_case : Union[str, Any] = i == len(snake_case_ ) - 1
_snake_case : Optional[Any] = get_down_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case_ )
# mid
_snake_case : Any = get_mid_block(
snake_case_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case_ , add_downsample=snake_case_ , )
# up
_snake_case : int = list(reversed(snake_case_ ) )
_snake_case : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_snake_case : Optional[Any] = out_channels
else:
_snake_case : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
_snake_case : Any = output_channel
_snake_case : Tuple = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
_snake_case : str = i == len(snake_case_ ) - 1
_snake_case : Union[str, Any] = get_up_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case_ )
_snake_case : Tuple = output_channel
# out
_snake_case : List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_snake_case : Any = get_out_block(
out_block_type=snake_case_ , num_groups_out=snake_case_ , embed_dim=block_out_channels[0] , out_channels=snake_case_ , act_fn=snake_case_ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = True , ):
_snake_case : Optional[int] = timestep
if not torch.is_tensor(snake_case_ ):
_snake_case : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
_snake_case : str = timesteps[None].to(sample.device )
_snake_case : Tuple = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
_snake_case : Any = self.time_mlp(snake_case_ )
else:
_snake_case : Tuple = timestep_embed[..., None]
_snake_case : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_snake_case : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_snake_case : Optional[Any] = ()
for downsample_block in self.down_blocks:
_snake_case , _snake_case : str = downsample_block(hidden_states=snake_case_ , temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_snake_case : List[str] = self.mid_block(snake_case_ , snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_snake_case : Any = down_block_res_samples[-1:]
_snake_case : Dict = down_block_res_samples[:-1]
_snake_case : Dict = upsample_block(snake_case_ , res_hidden_states_tuple=snake_case_ , temb=snake_case_ )
# 5. post-process
if self.out_block:
_snake_case : List[Any] = self.out_block(snake_case_ , snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a : List[str] = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Tuple = AudioLDMPipeline
__lowercase : str = TEXT_TO_AUDIO_PARAMS
__lowercase : str = TEXT_TO_AUDIO_BATCH_PARAMS
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=snake_case_ , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_snake_case : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
_snake_case : Tuple = ClapTextModelWithProjection(snake_case_ )
_snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
_snake_case : int = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case_ , )
_snake_case : Tuple = SpeechTaHifiGan(snake_case_ )
_snake_case : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : Optional[Any] = torch.manual_seed(snake_case_ )
else:
_snake_case : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Optional[int] = AudioLDMPipeline(**snake_case_ )
_snake_case : Any = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : List[str] = self.get_dummy_inputs(snake_case_ )
_snake_case : List[Any] = audioldm_pipe(**snake_case_ )
_snake_case : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 2_56
_snake_case : Union[str, Any] = audio[:10]
_snake_case : int = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_snake_case : int = self.get_dummy_components()
_snake_case : Any = AudioLDMPipeline(**snake_case_ )
_snake_case : Any = audioldm_pipe.to(snake_case_ )
_snake_case : List[str] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
_snake_case : str = 3 * [inputs["prompt"]]
# forward
_snake_case : Dict = audioldm_pipe(**snake_case_ )
_snake_case : List[str] = output.audios[0]
_snake_case : Optional[int] = self.get_dummy_inputs(snake_case_ )
_snake_case : str = 3 * [inputs.pop("prompt" )]
_snake_case : Optional[Any] = audioldm_pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_snake_case : Dict = text_inputs["input_ids"].to(snake_case_ )
_snake_case : Dict = audioldm_pipe.text_encoder(
snake_case_ , )
_snake_case : str = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : Optional[int] = F.normalize(snake_case_ , dim=-1 )
_snake_case : List[Any] = prompt_embeds
# forward
_snake_case : Tuple = audioldm_pipe(**snake_case_ )
_snake_case : Optional[int] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = self.get_dummy_components()
_snake_case : str = AudioLDMPipeline(**snake_case_ )
_snake_case : str = audioldm_pipe.to(snake_case_ )
_snake_case : List[Any] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : str = self.get_dummy_inputs(snake_case_ )
_snake_case : Any = 3 * ["this is a negative prompt"]
_snake_case : int = negative_prompt
_snake_case : Any = 3 * [inputs["prompt"]]
# forward
_snake_case : int = audioldm_pipe(**snake_case_ )
_snake_case : List[str] = output.audios[0]
_snake_case : str = self.get_dummy_inputs(snake_case_ )
_snake_case : str = 3 * [inputs.pop("prompt" )]
_snake_case : Optional[int] = []
for p in [prompt, negative_prompt]:
_snake_case : str = audioldm_pipe.tokenizer(
snake_case_ , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case_ , return_tensors="pt" , )
_snake_case : Tuple = text_inputs["input_ids"].to(snake_case_ )
_snake_case : Union[str, Any] = audioldm_pipe.text_encoder(
snake_case_ , )
_snake_case : List[str] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_snake_case : int = F.normalize(snake_case_ , dim=-1 )
embeds.append(snake_case_ )
_snake_case , _snake_case : List[Any] = embeds
# forward
_snake_case : str = audioldm_pipe(**snake_case_ )
_snake_case : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase__ ( self ):
_snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : List[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
_snake_case : int = AudioLDMPipeline(**snake_case_ )
_snake_case : int = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Tuple = "egg cracking"
_snake_case : Tuple = audioldm_pipe(**snake_case_ , negative_prompt=snake_case_ )
_snake_case : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 2_56
_snake_case : Dict = audio[:10]
_snake_case : Dict = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
_snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Any = self.get_dummy_components()
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case_ )
_snake_case : int = AudioLDMPipeline(**snake_case_ )
_snake_case : Dict = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : List[str] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_snake_case : Optional[Any] = audioldm_pipe(snake_case_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_snake_case : Any = 2
_snake_case : Union[str, Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
_snake_case : Optional[Any] = 2
_snake_case : Optional[int] = audioldm_pipe(snake_case_ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
_snake_case : str = 2
_snake_case : Optional[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def lowerCamelCase__ ( self ):
_snake_case : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.get_dummy_components()
_snake_case : Union[str, Any] = AudioLDMPipeline(**snake_case_ )
_snake_case : Dict = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Any = audioldm_pipe.vocoder.config.sampling_rate
_snake_case : Optional[int] = self.get_dummy_inputs(snake_case_ )
_snake_case : List[Any] = audioldm_pipe(audio_length_in_s=0.016 , **snake_case_ )
_snake_case : str = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) / vocoder_sampling_rate == 0.016
_snake_case : Optional[int] = audioldm_pipe(audio_length_in_s=0.032 , **snake_case_ )
_snake_case : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) / vocoder_sampling_rate == 0.032
def lowerCamelCase__ ( self ):
_snake_case : str = self.get_dummy_components()
_snake_case : Union[str, Any] = AudioLDMPipeline(**snake_case_ )
_snake_case : Optional[int] = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Any = ["hey"]
_snake_case : Any = audioldm_pipe(snake_case_ , num_inference_steps=1 )
_snake_case : Any = output.audios.shape
assert audio_shape == (1, 2_56)
_snake_case : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_snake_case : List[str] = SpeechTaHifiGan(snake_case_ ).to(snake_case_ )
_snake_case : List[Any] = audioldm_pipe(snake_case_ , num_inference_steps=1 )
_snake_case : List[Any] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ )
def lowerCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ )
@slow
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self , snake_case_ , snake_case_="cpu" , snake_case_=torch.floataa , snake_case_=0 ):
_snake_case : int = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : int = np.random.RandomState(snake_case_ ).standard_normal((1, 8, 1_28, 16) )
_snake_case : str = torch.from_numpy(snake_case_ ).to(device=snake_case_ , dtype=snake_case_ )
_snake_case : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_snake_case : Any = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Any = self.get_inputs(snake_case_ )
_snake_case : Tuple = 25
_snake_case : Any = audioldm_pipe(**snake_case_ ).audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 8_19_20
_snake_case : Tuple = audio[7_72_30:7_72_40]
_snake_case : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_snake_case : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCamelCase__ ( self ):
_snake_case : str = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_snake_case : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_snake_case : Tuple = audioldm_pipe.to(snake_case_ )
audioldm_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Any = self.get_inputs(snake_case_ )
_snake_case : str = audioldm_pipe(**snake_case_ ).audios[0]
assert audio.ndim == 1
assert len(snake_case_ ) == 8_19_20
_snake_case : Any = audio[2_77_80:2_77_90]
_snake_case : Dict = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_snake_case : List[str] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 | 1 |
"""simple docstring"""
from math import factorial
def a__ ( a : int , a : int , a : float ):
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(a , a ) or not isinstance(a , a ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_snake_case : Union[str, Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : Dict = float(factorial(a ) )
coefficient /= factorial(a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 87 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Input value must be an 'int' type" )
_snake_case : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_a : Any = logging.get_logger(__name__)
def a__ ( a : Optional[Any] , a : int=False ):
"""simple docstring"""
_snake_case : int = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight') )
rename_keys.append((f'patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias', f'vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Any = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def a__ ( a : Any , a : Tuple , a : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : Optional[int] = ""
else:
_snake_case : Tuple = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
_snake_case : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[
: config.hidden_size, :
]
_snake_case : int = in_proj_bias[: config.hidden_size]
_snake_case : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Optional[int] = in_proj_bias[-config.hidden_size :]
def a__ ( a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(a , a )
def a__ ( a : List[Any] , a : Tuple , a : int ):
"""simple docstring"""
_snake_case : int = dct.pop(a )
_snake_case : Optional[int] = val
def a__ ( ):
"""simple docstring"""
_snake_case : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : List[Any] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def a__ ( a : Any , a : Any , a : Optional[int]=False ):
"""simple docstring"""
_snake_case : Optional[int] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=a , )
_snake_case : int = ViTHybridConfig(backbone_config=a , image_size=384 , num_labels=1_000 )
_snake_case : int = False
# load original model from timm
_snake_case : Optional[int] = timm.create_model(a , pretrained=a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(a )
_snake_case : List[Any] = create_rename_keys(a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a , a )
_snake_case : str = "huggingface/label-files"
_snake_case : str = "imagenet-1k-id2label.json"
_snake_case : str = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
_snake_case : Optional[int] = {int(a ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : int = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_snake_case : int = ViTHybridModel(a ).eval()
else:
_snake_case : Any = ViTHybridForImageClassification(a ).eval()
model.load_state_dict(a )
# create image processor
_snake_case : Dict = create_transform(**resolve_data_config({} , model=a ) )
_snake_case : List[Any] = transform.transforms
_snake_case : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_snake_case : Tuple = ViTHybridImageProcessor(
do_resize=a , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=a , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_snake_case : Any = prepare_img()
_snake_case : Tuple = transform(a ).unsqueeze(0 )
_snake_case : List[Any] = processor(a , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(a , a )
# verify logits
with torch.no_grad():
_snake_case : Optional[int] = model(a )
_snake_case : Any = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
_snake_case : List[Any] = timm_model.forward_features(a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(a , outputs.pooler_output , atol=1e-3 )
else:
_snake_case : Optional[Any] = timm_model(a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(a ).mkdir(exist_ok=a )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a )
if push_to_hub:
print(f'Pushing model and processor to the hub {vit_name}' )
model.push_to_hub(f'ybelkada/{vit_name}' )
processor.push_to_hub(f'ybelkada/{vit_name}' )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
_a : Tuple = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
_a : Optional[int] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_a : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_a : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
_a : str = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _UpperCAmelCase ( _snake_case):
__lowercase : List[str] = """glpn"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 1_60, 2_56] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=64 , snake_case_=10 , snake_case_=-1 , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Union[str, Any] = num_channels
_snake_case : List[str] = num_encoder_blocks
_snake_case : int = depths
_snake_case : Optional[Any] = sr_ratios
_snake_case : Any = hidden_sizes
_snake_case : Union[str, Any] = patch_sizes
_snake_case : Tuple = strides
_snake_case : List[Any] = mlp_ratios
_snake_case : List[Any] = num_attention_heads
_snake_case : int = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : Optional[Any] = drop_path_rate
_snake_case : Optional[int] = layer_norm_eps
_snake_case : Union[str, Any] = decoder_hidden_size
_snake_case : int = max_depth
_snake_case : Any = head_in_index
| 87 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : int = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _snake_case):
__lowercase : Any = """roberta-prelayernorm"""
def __init__( self , snake_case_=5_02_65 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_snake_case : List[str] = vocab_size
_snake_case : str = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : str = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : int = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : Dict = layer_norm_eps
_snake_case : Tuple = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : Dict = classifier_dropout
class _UpperCAmelCase ( _snake_case):
@property
def lowerCamelCase__ ( self ):
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 87 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 1 |
"""simple docstring"""
_a : Optional[int] = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
"""simple docstring"""
import os
def a__ ( a : str = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
_snake_case : Optional[Any] = in_file.read()
_snake_case : Union[str, Any] = [[int(a ) for cell in row.split("," )] for row in data.strip().splitlines()]
_snake_case : int = [[0 for cell in row] for row in grid]
_snake_case : str = len(grid[0] )
_snake_case : str = [[0 for i in range(a )] for j in range(a )]
_snake_case : int = grid[0][0]
for i in range(1 , a ):
_snake_case : Dict = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
_snake_case : int = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
_snake_case : List[Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def a__ ( a : float , a : float , a : int ):
"""simple docstring"""
_snake_case : Any = x
_snake_case : int = y
for step in range(a ): # noqa: B007
_snake_case : str = a * a - b * b + x
_snake_case : Tuple = 2 * a * b + y
_snake_case : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( a : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def a__ ( a : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def a__ ( a : int = 800 , a : int = 600 , a : float = -0.6 , a : float = 0 , a : float = 3.2 , a : int = 50 , a : bool = True , ):
"""simple docstring"""
_snake_case : Any = Image.new("RGB" , (image_width, image_height) )
_snake_case : str = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
_snake_case : str = figure_width / image_width * image_height
_snake_case : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_snake_case : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_snake_case : List[str] = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_snake_case : Tuple = get_color_coded_rgb(a )
else:
_snake_case : List[str] = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_a : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 87 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _snake_case):
__lowercase : Dict = (DDPMScheduler,)
def lowerCamelCase__ ( self , **snake_case_ ):
_snake_case : Dict = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case_ )
return config
def lowerCamelCase__ ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCamelCase__ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def lowerCamelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def lowerCamelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def lowerCamelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def lowerCamelCase__ ( self ):
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , )
def lowerCamelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowerCamelCase__ ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self ):
_snake_case : int = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**snake_case_ )
_snake_case : Any = len(snake_case_ )
_snake_case : str = self.dummy_model()
_snake_case : Union[str, Any] = self.dummy_sample_deter
_snake_case : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
_snake_case : List[str] = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
_snake_case : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case : int = pred_prev_sample
_snake_case : Any = torch.sum(torch.abs(snake_case_ ) )
_snake_case : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
_snake_case : Union[str, Any] = scheduler_class(**snake_case_ )
_snake_case : str = len(snake_case_ )
_snake_case : Tuple = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter
_snake_case : Tuple = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
_snake_case : Dict = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
_snake_case : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_snake_case : int = pred_prev_sample
_snake_case : Dict = torch.sum(torch.abs(snake_case_ ) )
_snake_case : Optional[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def lowerCamelCase__ ( self ):
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Tuple = self.get_scheduler_config()
_snake_case : Dict = scheduler_class(**snake_case_ )
_snake_case : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
_snake_case : Tuple = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
_snake_case : int = -1
else:
_snake_case : Optional[Any] = timesteps[i + 1]
_snake_case : str = scheduler.previous_timestep(snake_case_ )
_snake_case : List[Any] = prev_t.item()
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : Optional[Any] = scheduler_class(**snake_case_ )
_snake_case : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(snake_case_ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config()
_snake_case : Union[str, Any] = scheduler_class(**snake_case_ )
_snake_case : List[str] = [1_00, 87, 50, 1, 0]
_snake_case : Optional[int] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config()
_snake_case : Dict = scheduler_class(**snake_case_ )
_snake_case : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 87 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 | 1 |
"""simple docstring"""
from itertools import count
def a__ ( a : int = 50 ):
"""simple docstring"""
_snake_case : Dict = [1] * min_block_length
for n in count(a ):
fill_count_functions.append(1 )
for block_length in range(a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : str = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
_a : Any = 10
_a : str = 256
def a__ ( a : List[str] ):
"""simple docstring"""
if len(a ) < MIN_NUM_TOKENS:
return None
_snake_case : Tuple = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def a__ ( a : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class _UpperCAmelCase :
def __init__( self , *,
snake_case_ = 0.85 , ):
_snake_case : Optional[Any] = duplication_jaccard_threshold
_snake_case : List[Any] = NUM_PERM
_snake_case : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_snake_case : str = defaultdict(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = self._index.query(snake_case_ )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = []
for base, duplicates in self._duplicate_clusters.items():
_snake_case : Tuple = [base] + list(snake_case_ )
# reformat the cluster to be a list of dict
_snake_case : Optional[int] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(snake_case_ )
return duplicate_clusters
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : int = self.get_duplicate_clusters()
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def a__ ( a : str ):
"""simple docstring"""
_snake_case , _snake_case : Union[str, Any] = element
_snake_case : Union[str, Any] = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a__ ( a : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def a__ ( a : Type[Dataset] , a : float ):
"""simple docstring"""
_snake_case : Union[str, Any] = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a__ ( a : str , a : str ):
"""simple docstring"""
_snake_case : Tuple = get_tokens(a )
_snake_case : List[Any] = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : List[Any] = None
def a__ ( a : Optional[int] , a : Union[str, Any] ):
"""simple docstring"""
_snake_case : Optional[Any] = []
for elementa in cluster:
_snake_case : Any = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
_snake_case : Dict = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_snake_case : str = 1
extremes.append(a )
return extremes
def a__ ( a : Union[str, Any] , a : Any , a : Optional[int] ):
"""simple docstring"""
global _shared_dataset
_snake_case : int = dataset
_snake_case : Dict = []
_snake_case : Union[str, Any] = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def a__ ( a : Type[Dataset] , a : float = 0.85 ):
"""simple docstring"""
_snake_case : Tuple = make_duplicate_clusters(a , a )
_snake_case : Tuple = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
_snake_case : Union[str, Any] = {}
_snake_case : Dict = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
_snake_case : Optional[int] = element
_snake_case : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_snake_case : Tuple = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_snake_case : Tuple = element["base_index"] in extreme_dict
if element["is_extreme"]:
_snake_case : Any = extreme_dict[element["base_index"]]["copies"]
print(f'Original dataset size: {len(a )}' )
print(f'Number of duplicate clusters: {len(a )}' )
print(f'Files in duplicate cluster: {len(a )}' )
print(f'Unique files in duplicate cluster: {len(a )}' )
print(f'Filtered dataset size: {len(a )}' )
return ds_filter, duplicate_clusters
| 87 |
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
def a__ ( a : int = 100 ):
"""simple docstring"""
_snake_case : Tuple = 0
_snake_case : List[str] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_a : Tuple = logging.get_logger(__name__)
def a__ ( a : Dict , a : str , a : Optional[int] , a : Optional[Any]=None , a : Optional[Any]=None ):
"""simple docstring"""
if "." in tensor_name:
_snake_case : Any = tensor_name.split("." )
for split in splits[:-1]:
_snake_case : int = getattr(a , a )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
_snake_case : List[Any] = new_module
_snake_case : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
_snake_case : str = tensor_name in module._buffers
_snake_case : int = getattr(a , a )
if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
_snake_case : Union[str, Any] = False
_snake_case : Any = False
if is_buffer or not is_bitsandbytes_available():
_snake_case : Union[str, Any] = False
_snake_case : Optional[int] = False
else:
_snake_case : List[Any] = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_snake_case : Tuple = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_snake_case : Optional[int] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_snake_case : Any = old_value.to(a )
elif isinstance(a , torch.Tensor ):
_snake_case : Optional[int] = value.to("cpu" )
if value.dtype == torch.inta:
_snake_case : Tuple = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse(
"0.37.2" )
if not is_abit_serializable:
raise ValueError(
"Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. "
"Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." )
else:
_snake_case : Optional[Any] = torch.tensor(a , device="cpu" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , a ) and fpaa_statistics is None:
_snake_case : Tuple = new_value.T
_snake_case : int = old_value.__dict__
if is_abit:
_snake_case : Dict = bnb.nn.IntaParams(a , requires_grad=a , **a ).to(a )
elif is_abit:
_snake_case : Optional[Any] = bnb.nn.Paramsabit(a , requires_grad=a , **a ).to(a )
_snake_case : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , "SCB" , fpaa_statistics.to(a ) )
else:
if value is None:
_snake_case : Tuple = old_value.to(a )
elif isinstance(a , torch.Tensor ):
_snake_case : List[str] = value.to(a )
else:
_snake_case : List[Any] = torch.tensor(a , device=a )
if is_buffer:
_snake_case : Tuple = new_value
else:
_snake_case : Optional[int] = nn.Parameter(a , requires_grad=old_value.requires_grad )
_snake_case : int = new_value
def a__ ( a : List[str] , a : List[str]=None , a : Tuple=None , a : Optional[int]=None , a : List[Any]=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
_snake_case : Any = []
current_key_name.append(a )
if (isinstance(a , nn.Linear ) or isinstance(a , a )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in ".".join(a ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(a , a ):
_snake_case , _snake_case : List[str] = module.weight.shape
else:
_snake_case : List[str] = module.in_features
_snake_case : Tuple = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_snake_case : Optional[int] = bnb.nn.LinearabitLt(
a , a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_snake_case : List[str] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_snake_case : str = bnb.nn.Linearabit(
a , a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_snake_case : Dict = True
# Store the module class in case we need to transpose the weight later
_snake_case : Optional[int] = type(a )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(a )
if len(list(module.children() ) ) > 0:
_snake_case , _snake_case : Dict = _replace_with_bnb_linear(
a , a , a , a , has_been_replaced=a , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a__ ( a : Dict , a : str=None , a : str=None , a : int=None ):
"""simple docstring"""
_snake_case : Any = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert
_snake_case , _snake_case : List[str] = _replace_with_bnb_linear(
a , a , a , a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def a__ ( *a : Dict , **a : Dict ):
"""simple docstring"""
warnings.warn(
"`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , a , )
return replace_with_bnb_linear(*a , **a )
def a__ ( *a : Optional[Any] , **a : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , a , )
return set_module_quantized_tensor_to_device(*a , **a )
def a__ ( a : Union[str, Any] ):
"""simple docstring"""
_snake_case : Optional[int] = deepcopy(a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_snake_case : Optional[int] = find_tied_parameters(a )
# For compatibility with Accelerate < 0.18
if isinstance(a , a ):
_snake_case : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_snake_case : Dict = sum(a , [] )
_snake_case : Optional[Any] = len(a ) > 0
# Check if it is a base model
_snake_case : List[Any] = not hasattr(a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_snake_case : List[str] = list(model.named_children() )
_snake_case : Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
_snake_case : Optional[int] = set(a ) - set(a )
_snake_case : Tuple = list(set(a ) ) + list(a )
# remove ".weight" from the keys
_snake_case : Optional[int] = [".weight", ".bias"]
_snake_case : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_snake_case : Tuple = name.replace(a , "" )
filtered_module_names.append(a )
return filtered_module_names
| 87 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_a : Dict = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_a : str = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_a : Dict = requests.get(image_url).content
_a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 87 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
# TODO: is there an appropriate internal test set?
__lowercase : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def lowerCamelCase__ ( self , snake_case_=0 ):
_snake_case : str = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(snake_case_ ) )
_snake_case : Any = torch.manual_seed(snake_case_ )
_snake_case : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Optional[int] = self.get_dummy_inputs()
_snake_case : Optional[Any] = pipe(**snake_case_ ).images
_snake_case : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Any = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_snake_case : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Tuple = self.get_dummy_inputs()
_snake_case : List[Any] = pipe(**snake_case_ ).images
_snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Optional[int] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self ):
_snake_case : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : str = self.get_dummy_inputs()
_snake_case : Union[str, Any] = pipe(**snake_case_ ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Any = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_snake_case : str = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Union[str, Any] = self.get_dummy_inputs()
_snake_case : Optional[int] = pipe(**snake_case_ ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Any = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_snake_case : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Dict = self.get_dummy_inputs()
_snake_case : Optional[Any] = pipe(**snake_case_ ).images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case : Tuple = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
@property
def lowerCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self ):
_snake_case : Tuple = ort.SessionOptions()
_snake_case : str = False
return options
def lowerCamelCase__ ( self ):
_snake_case : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_snake_case : List[Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
_snake_case : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Union[str, Any] = "A fantasy landscape, trending on artstation"
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Dict = pipe(
prompt=snake_case_ , image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="np" , )
_snake_case : Union[str, Any] = output.images
_snake_case : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_snake_case : int = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
_snake_case : str = init_image.resize((1_28, 1_28) )
_snake_case : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
_snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : Optional[int] = "A fantasy landscape, trending on artstation"
_snake_case : Tuple = torch.manual_seed(0 )
_snake_case : List[Any] = pipe(
prompt=snake_case_ , image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="np" , )
_snake_case : str = output.images
_snake_case : Dict = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_snake_case : Optional[Any] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : Any = logging.get_logger(__name__)
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_snake_case : Dict = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
_snake_case : Tuple = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_snake_case : Union[str, Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
_snake_case : List[str] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(a )-1}' )
if "norm" in key:
_snake_case : str = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_snake_case : int = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_snake_case : List[str] = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(a )-1}' )
if "layer_norm1" in key:
_snake_case : int = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_snake_case : Union[str, Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_snake_case : Tuple = key[key.find("block" ) + len("block" )]
_snake_case : List[Any] = key.replace(f'block{idx}' , f'block.{int(a )-1}' )
if "attn.q" in key:
_snake_case : List[str] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_snake_case : int = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_snake_case : Dict = key.replace("attn" , "attention.self" )
if "fc1" in key:
_snake_case : Tuple = key.replace("fc1" , "dense1" )
if "fc2" in key:
_snake_case : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_snake_case : List[str] = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_snake_case : Dict = key.replace("linear_fuse.conv" , "linear_fuse" )
_snake_case : Any = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_snake_case : Dict = key[key.find("linear_c" ) + len("linear_c" )]
_snake_case : Optional[Any] = key.replace(f'linear_c{idx}' , f'linear_c.{int(a )-1}' )
if "bot_conv" in key:
_snake_case : Tuple = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
_snake_case : str = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
_snake_case : str = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
_snake_case : List[Any] = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
_snake_case : Tuple = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
_snake_case : Optional[int] = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
_snake_case : str = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_snake_case : int = key.replace("module.last_layer_depth" , "head.head" )
_snake_case : List[Any] = value
return new_state_dict
def a__ ( a : Dict , a : str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_snake_case : List[str] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_snake_case : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_snake_case : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
_snake_case : Tuple = kv_weight[
config.hidden_sizes[i] :, :
]
_snake_case : Any = kv_bias[config.hidden_sizes[i] :]
def a__ ( ):
"""simple docstring"""
_snake_case : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : Dict = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def a__ ( a : int , a : Optional[int] , a : List[Any]=False , a : int=None ):
"""simple docstring"""
_snake_case : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_snake_case : Dict = GLPNImageProcessor()
# prepare image
_snake_case : List[str] = prepare_img()
_snake_case : Dict = image_processor(images=a , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_snake_case : Any = torch.load(a , map_location=torch.device("cpu" ) )
# rename keys
_snake_case : Tuple = rename_keys(a )
# key and value matrices need special treatment
read_in_k_v(a , a )
# create HuggingFace model and load state dict
_snake_case : Optional[Any] = GLPNForDepthEstimation(a )
model.load_state_dict(a )
model.eval()
# forward pass
_snake_case : int = model(a )
_snake_case : Optional[Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_snake_case : Any = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_snake_case : Tuple = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
_snake_case : Any = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a , a ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=a , )
image_processor.push_to_hub(
repo_path_or_name=Path(a , a ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=a , )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_a : Union[str, Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 87 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 1 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_a : int = TypeVar("""T""")
class _UpperCAmelCase ( Generic[T]):
def __init__( self , snake_case_ = True ):
_snake_case : dict[T, list[T]] = {} # dictionary of lists
_snake_case : Union[str, Any] = directed
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
self.adj_list[destination_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_snake_case : List[str] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(snake_case_ )
_snake_case : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_snake_case : Tuple = [destination_vertex]
_snake_case : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(snake_case_ )
_snake_case : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_snake_case : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_snake_case : Dict = [destination_vertex]
_snake_case : List[Any] = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_a : Tuple = pd.read_csv("""sample_data.csv""", header=None)
_a : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_a : int = df.iloc[:, 1:2]
_a : List[Any] = actual_data.values.reshape(len_data, 1)
_a : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
_a : Optional[Any] = 10
_a : int = 5
_a : List[Any] = 20
_a : Dict = len_data - periods * look_back
_a : Any = actual_data[:division]
_a : str = actual_data[division - look_back :]
_a, _a : Any = [], []
_a, _a : Any = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_a : Dict = np.array(train_x)
_a : Tuple = np.array(test_x)
_a : str = np.array([list(i.ravel()) for i in train_y])
_a : Optional[int] = np.array([list(i.ravel()) for i in test_y])
_a : Optional[int] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_a : List[str] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_a : Tuple = model.predict(x_test)
| 87 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_=3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : List[Any] = parent
_snake_case : Dict = batch_size
_snake_case : int = seq_length
_snake_case : int = is_training
_snake_case : Tuple = use_input_mask
_snake_case : str = use_token_type_ids
_snake_case : Optional[int] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : Optional[int] = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : Any = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Optional[int] = max_position_embeddings
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[str] = num_labels
_snake_case : Union[str, Any] = num_choices
_snake_case : Union[str, Any] = scope
def lowerCamelCase__ ( self ):
_snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : str = None
if self.use_input_mask:
_snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Tuple = None
_snake_case : Any = None
_snake_case : Any = None
_snake_case : int = None
if self.use_labels:
_snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=snake_case_ , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = FalconModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Dict = model(snake_case_ , attention_mask=snake_case_ )
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Optional[int] = True
_snake_case : Optional[int] = FalconModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Tuple = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
_snake_case : Any = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
_snake_case : Tuple = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Dict = FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Dict = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Optional[Any] = True
_snake_case : Dict = True
_snake_case : List[str] = FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
_snake_case : int = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
_snake_case : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case : Optional[int] = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
_snake_case : Tuple = model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
# select random slice
_snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase__ ( self ):
_snake_case : List[str] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase):
__lowercase : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Dict = (FalconForCausalLM,) if is_torch_available() else ()
__lowercase : str = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Optional[Any] = False
__lowercase : List[str] = False
def lowerCamelCase__ ( self ):
_snake_case : str = FalconModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case , *_snake_case : Any = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_snake_case : Optional[int] = alibi
self.model_tester.create_and_check_model(snake_case_ , *snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Dict = 3
_snake_case : List[str] = input_dict["input_ids"]
_snake_case : Any = input_ids.ne(1 ).to(snake_case_ )
_snake_case : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case : int = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Any = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[Any] = 3
_snake_case : int = "single_label_classification"
_snake_case : Any = input_dict["input_ids"]
_snake_case : List[Any] = input_ids.ne(1 ).to(snake_case_ )
_snake_case : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case : int = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = input_dict["input_ids"]
_snake_case : Optional[Any] = FalconForCausalLM(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : str = model(snake_case_ , use_cache=snake_case_ )
_snake_case : Any = input_ids.shape[0]
_snake_case : Union[str, Any] = model._convert_to_rw_cache(result.past_key_values )
_snake_case : Optional[Any] = model._convert_cache_to_standard_format(snake_case_ , snake_case_ )
for layer in range(len(snake_case_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = 3
_snake_case : Optional[Any] = "multi_label_classification"
_snake_case : Optional[int] = input_dict["input_ids"]
_snake_case : Dict = input_ids.ne(1 ).to(snake_case_ )
_snake_case : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case : Any = FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(snake_case_ , "use_cache" ):
return
_snake_case : Dict = model_class(snake_case_ ).to(snake_case_ )
if "use_cache" not in inputs:
_snake_case : str = True
_snake_case : Optional[Any] = model(**snake_case_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_snake_case : str = (
getattr(snake_case_ , "decoder_layers" , snake_case_ )
or getattr(snake_case_ , "num_decoder_layers" , snake_case_ )
or config.num_hidden_layers
)
_snake_case : Tuple = getattr(snake_case_ , "num_kv_heads" , config.num_attention_heads )
_snake_case : Dict = getattr(snake_case_ , "d_model" , config.hidden_size )
_snake_case : Dict = embed_dim // num_attention_heads
_snake_case : str = outputs["past_key_values"]
self.assertEqual(len(snake_case_ ) , snake_case_ )
_snake_case , _snake_case : str = inputs["input_ids"].shape
for i in range(snake_case_ ):
if config.new_decoder_architecture:
_snake_case : Any = config.num_attention_heads
elif config.multi_query:
_snake_case : str = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : List[str] = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
_snake_case : List[str] = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(snake_case_ )
_snake_case : str = tokenizer("My favorite food is" , return_tensors="pt" ).to(snake_case_ )
_snake_case : Union[str, Any] = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
_snake_case : Dict = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=19 )
_snake_case : List[str] = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(snake_case_ )
_snake_case : List[Any] = FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(snake_case_ )
_snake_case : List[str] = tokenizer("My favorite food is" , return_tensors="pt" ).to(snake_case_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCamelCase__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_snake_case : Any = AutoTokenizer.from_pretrained(snake_case_ )
_snake_case : Tuple = FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(device=snake_case_ )
_snake_case : List[Any] = tokenizer("My favorite food is" , return_tensors="pt" ).to(snake_case_ )
# Test results are the same with and without cache
_snake_case : Dict = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 , use_cache=snake_case_ )
_snake_case : Optional[int] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 , use_cache=snake_case_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a : Optional[int] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def a__ ( ):
"""simple docstring"""
_snake_case : Dict = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_snake_case : Union[str, Any] = get_sagemaker_input()
else:
_snake_case : Any = get_cluster_input()
return config
def a__ ( a : int=None ):
"""simple docstring"""
if subparsers is not None:
_snake_case : Any = subparsers.add_parser("config" , description=a )
else:
_snake_case : Optional[Any] = argparse.ArgumentParser("Accelerate config command" , description=a )
parser.add_argument(
"--config_file" , default=a , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def a__ ( a : Optional[Any] ):
"""simple docstring"""
_snake_case : Dict = get_user_input()
if args.config_file is not None:
_snake_case : List[Any] = args.config_file
else:
if not os.path.isdir(a ):
os.makedirs(a )
_snake_case : Optional[Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(a )
else:
config.to_yaml_file(a )
print(f'accelerate configuration saved at {config_file}' )
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[int] = config_command_parser()
_snake_case : str = parser.parse_args()
config_command(a )
if __name__ == "__main__":
main()
| 87 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ):
_snake_case : int = parent
_snake_case : Any = batch_size
_snake_case : Any = seq_length
_snake_case : Dict = is_training
_snake_case : Tuple = use_attention_mask
_snake_case : Union[str, Any] = use_token_type_ids
_snake_case : Optional[int] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : Any = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : Optional[int] = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Tuple = attention_probs_dropout_prob
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : int = type_vocab_size
_snake_case : List[str] = type_sequence_label_size
_snake_case : Optional[Any] = initializer_range
_snake_case : int = num_choices
def lowerCamelCase__ ( self ):
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Union[str, Any] = None
if self.use_attention_mask:
_snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self ):
_snake_case : str = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Dict = config_and_inputs
_snake_case : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = config_and_inputs
_snake_case : List[Any] = True
_snake_case : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Union[str, Any] = True
__lowercase : List[str] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase__ ( self ):
for model_class_name in self.all_model_classes:
_snake_case : Optional[Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case_ )
_snake_case : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case_ )
_snake_case : Optional[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_snake_case : Dict = model(snake_case_ )[0]
_snake_case : Any = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , snake_case_ )
# compare the actual values for a slice.
_snake_case : Optional[int] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self ):
_snake_case : Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=snake_case_ )
_snake_case : Tuple = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_snake_case : int = model(snake_case_ )[0]
# compare the actual values for a slice.
_snake_case : int = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=3 , snake_case_=4 , snake_case_=[10, 20, 30, 40] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=37 , snake_case_="gelu" , snake_case_=10 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ):
_snake_case : Dict = parent
_snake_case : Tuple = batch_size
_snake_case : Optional[int] = image_size
_snake_case : Dict = num_channels
_snake_case : List[Any] = num_stages
_snake_case : str = hidden_sizes
_snake_case : Union[str, Any] = depths
_snake_case : int = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = intermediate_size
_snake_case : Tuple = hidden_act
_snake_case : Tuple = num_labels
_snake_case : Optional[Any] = initializer_range
_snake_case : int = out_features
_snake_case : Union[str, Any] = out_indices
_snake_case : Union[str, Any] = scope
def lowerCamelCase__ ( self ):
_snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = ConvNextVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[Any] = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = ConvNextVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : str = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Tuple = ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[Any] = model(snake_case_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case : Any = None
_snake_case : str = ConvNextVaBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : str = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : str = config_and_inputs
_snake_case : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : List[str] = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : Optional[int] = config_and_inputs
_snake_case : Union[str, Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowercase : str = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self ):
_snake_case : Any = ConvNextVaModelTester(self )
_snake_case : List[str] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : Tuple = True
if model_class.__name__ in [
*get_values(snake_case_ ),
*get_values(snake_case_ ),
]:
continue
_snake_case : str = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_snake_case : List[Any] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_snake_case : List[str] = model(**snake_case_ ).loss
loss.backward()
def lowerCamelCase__ ( self ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case : List[str] = False
_snake_case : Optional[int] = True
if (
model_class.__name__
in [*get_values(snake_case_ ), *get_values(snake_case_ )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case : Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.gradient_checkpointing_enable()
model.train()
_snake_case : Optional[int] = self._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
_snake_case : List[Any] = model(**snake_case_ ).loss
loss.backward()
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(snake_case_ )
_snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[Any] = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase__ ( self ):
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_snake_case : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_snake_case : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Optional[int] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : Optional[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Dict = ConvNextVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowerCamelCase__ ( self ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(snake_case_ )
_snake_case : Any = self.default_image_processor
_snake_case : List[str] = prepare_img()
_snake_case : Tuple = preprocessor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**snake_case_ )
# verify the logits
_snake_case : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_snake_case : Any = torch.tensor([0.9996, 0.1966, -0.4386] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 87 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 | 1 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def a__ ( a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( ):
"""simple docstring"""
_snake_case : List[str] = 2
while True:
if is_prime(a ):
yield num
num += 1
def a__ ( a : int = 2_000_000 ):
"""simple docstring"""
return sum(takewhile(lambda a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Input value must be an 'int' type" )
_snake_case : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_a : List[Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_a : Dict = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_a : Union[str, Any] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def lowerCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def lowerCamelCase__ ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_="uniform_average" , snake_case_=True ):
_snake_case : Optional[Any] = mean_squared_error(
snake_case_ , snake_case_ , sample_weight=snake_case_ , multioutput=snake_case_ , squared=snake_case_ )
return {"mse": mse}
| 87 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 1 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
def a__ ( a : Optional[Any] ):
"""simple docstring"""
print("Loading config file..." )
def flatten_yaml_as_dict(a : Tuple , a : Union[str, Any]="" , a : Dict="." ):
_snake_case : Tuple = []
for k, v in d.items():
_snake_case : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(a , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(a , a , sep=a ).items() )
else:
items.append((new_key, v) )
return dict(a )
_snake_case : Optional[int] = argparse.Namespace()
with open(a , "r" ) as yaml_file:
try:
_snake_case : Any = yaml.load(a , Loader=yaml.FullLoader )
_snake_case : List[str] = flatten_yaml_as_dict(a )
for k, v in flat_cfg.items():
setattr(a , a , a )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(a , str(a ) ) )
return config
def a__ ( a : Any , a : Dict ):
"""simple docstring"""
_snake_case : Union[str, Any] = MobileViTVaConfig()
_snake_case : Union[str, Any] = False
# dataset
if task_name.startswith("imagenet1k_" ):
_snake_case : Union[str, Any] = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_snake_case : Dict = 384
else:
_snake_case : Any = 256
_snake_case : List[str] = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
_snake_case : Any = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_snake_case : Tuple = 384
else:
_snake_case : str = 256
_snake_case : str = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
_snake_case : Optional[Any] = 151
_snake_case : List[str] = 512
_snake_case : List[Any] = "ade20k-id2label.json"
_snake_case : Optional[Any] = True
elif task_name.startswith("voc_" ):
_snake_case : List[str] = 21
_snake_case : Dict = 512
_snake_case : str = "pascal-voc-id2label.json"
_snake_case : List[Any] = True
# orig_config
_snake_case : List[str] = load_orig_config_file(a )
assert getattr(a , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : int = getattr(a , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(a , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : Any = getattr(a , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Optional[Any] = getattr(a , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
_snake_case : Optional[Any] = getattr(a , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
_snake_case : Tuple = getattr(a , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
_snake_case : List[Any] = getattr(a , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
_snake_case : Optional[int] = "huggingface/label-files"
_snake_case : Tuple = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
_snake_case : Any = {int(a ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : str = {v: k for k, v in idalabel.items()}
return config
def a__ ( a : str , a : Dict , a : Union[str, Any] ):
"""simple docstring"""
_snake_case : int = dct.pop(a )
_snake_case : Tuple = val
def a__ ( a : Tuple , a : int=False ):
"""simple docstring"""
if base_model:
_snake_case : List[Any] = ""
else:
_snake_case : Optional[int] = "mobilevitv2."
_snake_case : Tuple = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[Any] = k[8:]
else:
_snake_case : Tuple = k
if ".block." in k:
_snake_case : Optional[int] = k_new.replace(".block." , "." )
if ".conv." in k:
_snake_case : Dict = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
_snake_case : Union[str, Any] = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
_snake_case : Union[str, Any] = k_new.replace("conv_1." , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
_snake_case : Any = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
_snake_case : Optional[int] = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
_snake_case : Tuple = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
_snake_case : List[Any] = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
_snake_case : Optional[int] = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
_snake_case : List[str] = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Tuple = [0, 1]
elif i == 4:
_snake_case : str = [0, 1, 2, 3]
elif i == 5:
_snake_case : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
_snake_case : Tuple = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
_snake_case : int = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
_snake_case : List[str] = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
_snake_case : Any = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
_snake_case : Union[str, Any] = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
_snake_case : str = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
_snake_case : str = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
_snake_case : Tuple = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
_snake_case : Union[str, Any] = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
_snake_case : Union[str, Any] = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
_snake_case : Optional[Any] = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
_snake_case : Dict = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Optional[int] = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(a )
for k in keys_to_ignore:
state_dict.pop(a , a )
def a__ ( ):
"""simple docstring"""
_snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : List[Any] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def a__ ( a : Dict , a : Optional[int] , a : Any , a : List[str] ):
"""simple docstring"""
_snake_case : Optional[int] = get_mobilevitva_config(a , a )
# load original state_dict
_snake_case : List[str] = torch.load(a , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_snake_case : Any = MobileViTVaForSemanticSegmentation(a ).eval()
_snake_case : List[str] = False
else:
_snake_case : Any = MobileViTVaForImageClassification(a ).eval()
_snake_case : Dict = False
# remove and rename some keys of load the original model
_snake_case : int = checkpoint
remove_unused_keys(a )
_snake_case : Dict = create_rename_keys(a , base_model=a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a , a , a )
# load modified state_dict
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : int = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : str = image_processor(images=prepare_img() , return_tensors="pt" )
_snake_case : Tuple = model(**a )
# verify classification model
if task_name.startswith("imagenet" ):
_snake_case : List[str] = outputs.logits
_snake_case : Optional[int] = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : List[Any] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , a , atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
_a : Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( _snake_case):
__lowercase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
__lowercase : Dict = """LayoutLMv2ImageProcessor"""
__lowercase : List[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_snake_case : List[str] = kwargs.pop("feature_extractor" )
_snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_snake_case : List[Any] = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Optional[Any] = features["words"]
_snake_case : List[Any] = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
_snake_case : int = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_snake_case : int = self.get_overflowing_images(snake_case_ , encoded_inputs["overflow_to_sample_mapping"] )
_snake_case : Optional[int] = images
return encoded_inputs
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_snake_case : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(snake_case_ )} and {len(snake_case_ )}' )
return images_with_overflow
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase__ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 87 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | 1 |
"""simple docstring"""
def a__ ( a : int = 100 ):
"""simple docstring"""
_snake_case : int = (n * (n + 1) // 2) ** 2
_snake_case : Any = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 1 |
"""simple docstring"""
def a__ ( a : int = 1_000 ):
"""simple docstring"""
_snake_case : List[str] = 3
_snake_case : List[Any] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
"""simple docstring"""
import torch
def a__ ( ):
"""simple docstring"""
if torch.cuda.is_available():
_snake_case : int = torch.cuda.device_count()
else:
_snake_case : int = 0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 87 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
import requests
def a__ ( a : str ):
"""simple docstring"""
_snake_case : Union[str, Any] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(a ).json()
def a__ ( a : int = 10 ):
"""simple docstring"""
_snake_case : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
_snake_case : Any = requests.get(a ).json()[:max_stories]
return [get_hackernews_story(a ) for story_id in story_ids]
def a__ ( a : int = 10 ):
"""simple docstring"""
_snake_case : Optional[Any] = hackernews_top_stories(a )
return "\n".join("* [{title}]({url})".format(**a ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 87 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 1 |
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_a : Union[str, Any] = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
_a : str = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def a__ ( a : Union[str, Any] , a : str ):
"""simple docstring"""
_snake_case : Any = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_snake_case : Optional[Any] = int(re.match(R".*layer_(\d*).*" , a )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def a__ ( a : Union[str, Any] ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_snake_case : str = re.search(R"[^\d](\d+)$" , str(a ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
_snake_case : Union[str, Any] = int(bit_search.groups()[0] )
return bit_size // 8
def a__ ( a : Optional[Any] , a : List[str] , a : Any , a : Optional[int] , a : Any ):
"""simple docstring"""
if bloom_config_file == "":
_snake_case : str = BloomConfig()
else:
_snake_case : Dict = BloomConfig.from_json_file(a )
if shard_model:
_snake_case : Dict = os.listdir(a )
_snake_case : Any = sorted(filter(lambda a : s.startswith("layer" ) and "model_00" in s , a ) )
_snake_case : Optional[int] = {"weight_map": {}, "metadata": {}}
_snake_case : Optional[int] = 0
_snake_case : str = None
_snake_case : Optional[Any] = BloomConfig()
for j, file in enumerate(a ):
print("Processing file: {}".format(a ) )
_snake_case : Optional[int] = None
for i in range(a ):
# load all TP files
_snake_case : List[Any] = file.replace("model_00" , f'model_0{i}' )
_snake_case : Tuple = torch.load(os.path.join(a , a ) , map_location="cpu" )
# Rename keys in the transformers names
_snake_case : Optional[int] = list(temp.keys() )
for key in keys:
_snake_case : Optional[Any] = temp.pop(a )
if tensors is None:
_snake_case : Any = temp
else:
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_snake_case : Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_snake_case : Any = torch.cat([tensors[key], temp[key]] , dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_snake_case : List[str] = tensors[key] / pretraining_tp
torch.save(
a , os.path.join(
a , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_snake_case : Dict = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_snake_case : List[str] = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) )
_snake_case : Optional[Any] = BloomConfig()
_snake_case : Tuple = pytorch_dump_folder_path + "/" + CONFIG_NAME
_snake_case : Optional[int] = total_size
with open(a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
else:
_snake_case : List[str] = BloomModel(a )
_snake_case : Optional[int] = os.listdir(a )
_snake_case : int = sorted(filter(lambda a : s.startswith("layer" ) and "model_00" in s , a ) )
_snake_case : Tuple = None
for i, file in enumerate(a ):
_snake_case : Optional[Any] = None
for i in range(a ):
# load all TP files
_snake_case : Tuple = file.replace("model_00" , f'model_0{i}' )
_snake_case : Union[str, Any] = torch.load(os.path.join(a , a ) , map_location="cpu" )
# Rename keys in the transformers names
_snake_case : Optional[Any] = list(temp.keys() )
for key in keys:
_snake_case : List[Any] = temp.pop(a )
if tensors is None:
_snake_case : Optional[Any] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_snake_case : str = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_snake_case : Any = torch.cat([tensors[key], temp[key]] , dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_snake_case : Dict = tensors[key] / pretraining_tp
_snake_case : Dict = model.load_state_dict(a , strict=a )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
_snake_case : Optional[Any] = set(other_keys.missing_keys )
else:
_snake_case : str = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a , exist_ok=a )
_snake_case : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_snake_case : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
_snake_case : List[Any] = model.to(config.torch_dtype )
torch.save(model.state_dict() , a )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
_a : Dict = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 87 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Tuple = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """gpt_neox"""
def __init__( self , snake_case_=5_04_32 , snake_case_=61_44 , snake_case_=44 , snake_case_=64 , snake_case_=2_45_76 , snake_case_="gelu" , snake_case_=0.25 , snake_case_=1_00_00 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=20_48 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=True , snake_case_=0 , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_snake_case : str = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : str = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Tuple = hidden_act
_snake_case : Dict = rotary_pct
_snake_case : List[Any] = rotary_emb_base
_snake_case : Union[str, Any] = attention_dropout
_snake_case : int = hidden_dropout
_snake_case : List[Any] = classifier_dropout
_snake_case : Any = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : List[Any] = use_cache
_snake_case : int = tie_word_embeddings
_snake_case : Any = use_parallel_residual
_snake_case : str = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCamelCase__ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
_snake_case : List[Any] = self.rope_scaling.get("type" , snake_case_ )
_snake_case : List[str] = self.rope_scaling.get("factor" , snake_case_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case_ , snake_case_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a__ ( a : Union[str, Any] ):
"""simple docstring"""
_snake_case : str = filter(lambda a : p.requires_grad , model.parameters() )
_snake_case : Dict = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_a : Optional[int] = logging.getLogger(__name__)
def a__ ( a : Tuple , a : List[str] ):
"""simple docstring"""
if metric == "rouge2":
_snake_case : Tuple = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_snake_case : List[str] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_snake_case : List[Any] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_snake_case : List[str] = ModelCheckpoint(
dirpath=a , filename=a , monitor=f'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a__ ( a : Optional[int] , a : Any ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=a , verbose=a , )
class _UpperCAmelCase ( pl.Callback):
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Dict = {F'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
_snake_case : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_snake_case : List[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : List[str] = od / "test_results.txt"
_snake_case : Optional[int] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : List[Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
_snake_case : Dict = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , "a+" ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : List[str] = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Any = val.item()
_snake_case : List[str] = F'{key}: {val:.6f}\n'
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : Optional[Any] = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(snake_case_ )
@rank_zero_only
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
try:
_snake_case : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Tuple = pl_module.model.num_parameters()
_snake_case : List[Any] = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , "test" )
@rank_zero_only
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 87 |
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ = 0 ):
_snake_case : Tuple = key
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
_snake_case : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(snake_case_ ) ^ key ) for ch in content]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
_snake_case : Tuple = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(snake_case_ ) ^ key ) for ch in content]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
_snake_case : Optional[int] = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
_snake_case : Optional[Any] = ""
for ch in content:
ans += chr(ord(snake_case_ ) ^ key )
return ans
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
_snake_case : int = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
_snake_case : List[str] = ""
for ch in content:
ans += chr(ord(snake_case_ ) ^ key )
return ans
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = 0 ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
try:
with open(snake_case_ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(snake_case_ , snake_case_ ) )
except OSError:
return False
return True
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
assert isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ )
try:
with open(snake_case_ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(snake_case_ , snake_case_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 87 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Tuple = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[int] = """pix2struct_text_model"""
__lowercase : Optional[Any] = ["""past_key_values"""]
__lowercase : List[Any] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=5_02_44 , snake_case_=7_68 , snake_case_=64 , snake_case_=20_48 , snake_case_=12 , snake_case_=12 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gelu_new" , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=False , snake_case_=True , **snake_case_ , ):
_snake_case : int = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : int = d_kv
_snake_case : Any = d_ff
_snake_case : int = num_layers
_snake_case : List[str] = num_heads
_snake_case : Any = relative_attention_num_buckets
_snake_case : Union[str, Any] = relative_attention_max_distance
_snake_case : str = dropout_rate
_snake_case : str = layer_norm_epsilon
_snake_case : Optional[Any] = initializer_factor
_snake_case : Any = use_cache
_snake_case : Dict = eos_token_id
_snake_case : Dict = decoder_start_token_id
# for backwards compatibility
_snake_case : List[str] = dense_act_fn
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , tie_word_embeddings=snake_case_ , is_decoder=snake_case_ , **snake_case_ , )
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
_snake_case , _snake_case : Dict = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
_snake_case : int = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( _snake_case):
__lowercase : List[str] = """pix2struct_vision_model"""
def __init__( self , snake_case_=7_68 , snake_case_=7_68 , snake_case_=20_48 , snake_case_=64 , snake_case_=12 , snake_case_=12 , snake_case_="gelu_new" , snake_case_=1E-6 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=1E-10 , snake_case_=1.0 , snake_case_=40_96 , snake_case_=32 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Optional[Any] = hidden_size
_snake_case : Dict = patch_embed_hidden_size
_snake_case : Tuple = d_ff
_snake_case : Tuple = dropout_rate
_snake_case : int = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Any = initializer_range
_snake_case : List[str] = initializer_factor
_snake_case : Tuple = attention_dropout
_snake_case : Optional[int] = layer_norm_eps
_snake_case : int = dense_act_fn
_snake_case : int = seq_len
_snake_case : Optional[Any] = relative_attention_num_buckets
_snake_case : str = relative_attention_max_distance
_snake_case : List[Any] = d_kv
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , **snake_case_ ):
cls._set_token_in_kwargs(snake_case_ )
_snake_case , _snake_case : str = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
_snake_case : Optional[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( _snake_case):
__lowercase : List[Any] = """pix2struct"""
__lowercase : str = True
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=1.0 , snake_case_=0.02 , snake_case_=False , snake_case_=False , snake_case_=True , **snake_case_ , ):
super().__init__(tie_word_embeddings=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ )
if text_config is None:
_snake_case : List[str] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
_snake_case : Optional[Any] = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
_snake_case : Tuple = PixaStructTextConfig(**snake_case_ )
_snake_case : Optional[int] = PixaStructVisionConfig(**snake_case_ )
_snake_case : List[str] = self.text_config.decoder_start_token_id
_snake_case : List[str] = self.text_config.pad_token_id
_snake_case : Dict = self.text_config.eos_token_id
_snake_case : Union[str, Any] = initializer_factor
_snake_case : str = initializer_range
_snake_case : Any = self.initializer_range
_snake_case : Optional[int] = self.initializer_range
_snake_case : List[str] = is_vqa
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[str] = copy.deepcopy(self.__dict__ )
_snake_case : str = self.text_config.to_dict()
_snake_case : Optional[int] = self.vision_config.to_dict()
_snake_case : Optional[int] = self.__class__.model_type
return output
| 87 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_a : Dict = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_a : str = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_a : Dict = requests.get(image_url).content
_a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 87 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
_snake_case : str = value_function
_snake_case : str = unet
_snake_case : List[str] = scheduler
_snake_case : Optional[Any] = env
_snake_case : List[Any] = env.get_dataset()
_snake_case : Union[str, Any] = {}
for key in self.data.keys():
try:
_snake_case : int = self.data[key].mean()
except: # noqa: E722
pass
_snake_case : Optional[Any] = {}
for key in self.data.keys():
try:
_snake_case : str = self.data[key].std()
except: # noqa: E722
pass
_snake_case : Any = env.observation_space.shape[0]
_snake_case : str = env.action_space.shape[0]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase__ ( self , snake_case_ ):
if type(snake_case_ ) is dict:
return {k: self.to_torch(snake_case_ ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case_ ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case_ , device=self.unet.device )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
for key, val in cond.items():
_snake_case : Optional[int] = val.clone()
return x_in
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = x.shape[0]
_snake_case : Optional[Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_snake_case : Union[str, Any] = torch.full((batch_size,) , snake_case_ , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_snake_case : Optional[Any] = self.value_function(x.permute(0 , 2 , 1 ) , snake_case_ ).sample
_snake_case : Tuple = torch.autograd.grad([y.sum()] , [x] )[0]
_snake_case : Optional[int] = self.scheduler._get_variance(snake_case_ )
_snake_case : Dict = torch.exp(0.5 * posterior_variance )
_snake_case : int = model_std * grad
_snake_case : List[Any] = 0
_snake_case : int = x.detach()
_snake_case : int = x + scale * grad
_snake_case : Dict = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : List[Any] = self.unet(x.permute(0 , 2 , 1 ) , snake_case_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_snake_case : List[str] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , predict_epsilon=snake_case_ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_snake_case : Union[str, Any] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : int = self.to_torch(snake_case_ )
return x, y
def __call__( self , snake_case_ , snake_case_=64 , snake_case_=32 , snake_case_=2 , snake_case_=0.1 ):
# normalize the observations and create batch dimension
_snake_case : Dict = self.normalize(snake_case_ , "observations" )
_snake_case : Union[str, Any] = obs[None].repeat(snake_case_ , axis=0 )
_snake_case : Dict = {0: self.to_torch(snake_case_ )}
_snake_case : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_snake_case : List[str] = randn_tensor(snake_case_ , device=self.unet.device )
_snake_case : Optional[int] = self.reset_xa(snake_case_ , snake_case_ , self.action_dim )
_snake_case : Optional[int] = self.to_torch(snake_case_ )
# run the diffusion process
_snake_case , _snake_case : Optional[int] = self.run_diffusion(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# sort output trajectories by value
_snake_case : Union[str, Any] = y.argsort(0 , descending=snake_case_ ).squeeze()
_snake_case : List[str] = x[sorted_idx]
_snake_case : Union[str, Any] = sorted_values[:, :, : self.action_dim]
_snake_case : Union[str, Any] = actions.detach().cpu().numpy()
_snake_case : List[Any] = self.de_normalize(snake_case_ , key="actions" )
# select the action with the highest value
if y is not None:
_snake_case : Dict = 0
else:
# if we didn't run value guiding, select a random action
_snake_case : Union[str, Any] = np.random.randint(0 , snake_case_ )
_snake_case : List[Any] = denorm_actions[selected_index, 0]
return denorm_actions
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : str = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
def a__ ( a : List[str] , a : Dict , a : List[str] ):
"""simple docstring"""
_snake_case : Tuple = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
_snake_case : Tuple = downstream_dict["projector.weight"]
_snake_case : int = downstream_dict["projector.bias"]
_snake_case : int = downstream_dict["model.post_net.linear.weight"]
_snake_case : Union[str, Any] = downstream_dict["model.post_net.linear.bias"]
return model
def a__ ( a : int , a : str , a : Tuple ):
"""simple docstring"""
_snake_case : Any = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
_snake_case : int = downstream_dict["model.linear.weight"]
_snake_case : List[str] = downstream_dict["model.linear.bias"]
return model
def a__ ( a : str , a : Union[str, Any] , a : Dict ):
"""simple docstring"""
_snake_case : Any = WavaVecaForXVector.from_pretrained(a , config=a )
_snake_case : int = downstream_dict["connector.weight"]
_snake_case : Dict = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_snake_case : Optional[Any] = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_snake_case : Union[str, Any] = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_snake_case : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_snake_case : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_snake_case : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_snake_case : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_snake_case : Dict = downstream_dict["objective.W"]
return model
@torch.no_grad()
def a__ ( a : List[Any] , a : List[Any] , a : List[str] , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = torch.load(a , map_location="cpu" )
_snake_case : List[str] = checkpoint["Downstream"]
_snake_case : Any = WavaVecaConfig.from_pretrained(a )
_snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
_snake_case : Optional[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_snake_case : Any = convert_classification(a , a , a )
elif arch.endswith("ForAudioFrameClassification" ):
_snake_case : List[Any] = convert_diarization(a , a , a )
elif arch.endswith("ForXVector" ):
_snake_case : str = convert_xvector(a , a , a )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_snake_case : str = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_a : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=18 , snake_case_=30 , snake_case_=4_00 , snake_case_=True , snake_case_=None , snake_case_=True , ):
_snake_case : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_snake_case : Optional[int] = parent
_snake_case : Dict = batch_size
_snake_case : Optional[Any] = num_channels
_snake_case : List[str] = image_size
_snake_case : Optional[int] = min_resolution
_snake_case : str = max_resolution
_snake_case : Dict = do_resize
_snake_case : Optional[int] = size
_snake_case : List[str] = do_normalize
def lowerCamelCase__ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
_snake_case : List[str] = ImageGPTImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "clusters" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
_snake_case : Dict = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : List[Any] = os.path.join(snake_case_ , "image_processor.json" )
image_processor_first.to_json_file(snake_case_ )
_snake_case : str = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
_snake_case : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
_snake_case : Any = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
_snake_case : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCamelCase__ ( self ):
pass
def a__ ( ):
"""simple docstring"""
_snake_case : Optional[int] = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_snake_case : List[str] = Image.open(dataset[4]["file"] )
_snake_case : List[str] = Image.open(dataset[5]["file"] )
_snake_case : Union[str, Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_snake_case : Any = prepare_images()
# test non-batched
_snake_case : int = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
_snake_case : Optional[Any] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
_snake_case : Dict = image_processing(snake_case_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
_snake_case : Any = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 87 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Optional[Any] = FileLock(str(tmpdir / "foo.lock" ) )
_snake_case : int = FileLock(str(tmpdir / "foo.lock" ) )
_snake_case : int = 0.01
with locka.acquire():
with pytest.raises(a ):
_snake_case : List[Any] = time.time()
locka.acquire(a )
assert time.time() - _start > timeout
def a__ ( a : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[int] = "a" * 1_000 + ".lock"
_snake_case : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(a )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_snake_case : int = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a ):
locka.acquire(0 )
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Dict = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """mobilenet_v1"""
def __init__( self , snake_case_=3 , snake_case_=2_24 , snake_case_=1.0 , snake_case_=8 , snake_case_="relu6" , snake_case_=True , snake_case_=0.999 , snake_case_=0.02 , snake_case_=0.001 , **snake_case_ , ):
super().__init__(**snake_case_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_snake_case : List[str] = num_channels
_snake_case : str = image_size
_snake_case : Optional[Any] = depth_multiplier
_snake_case : List[Any] = min_depth
_snake_case : Any = hidden_act
_snake_case : Optional[int] = tf_padding
_snake_case : Tuple = classifier_dropout_prob
_snake_case : List[Any] = initializer_range
_snake_case : Optional[int] = layer_norm_eps
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = version.parse("""1.11""")
@property
def lowerCamelCase__ ( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowerCamelCase__ ( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowerCamelCase__ ( self ):
return 1E-4
| 87 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87 | 1 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
_snake_case : List[str] = [1]
_snake_case , _snake_case , _snake_case : int = 0, 0, 0
_snake_case : Optional[Any] = ugly_nums[ia] * 2
_snake_case : int = ugly_nums[ia] * 3
_snake_case : Any = ugly_nums[ia] * 5
for _ in range(1 , a ):
_snake_case : Any = min(a , a , a )
ugly_nums.append(a )
if next_num == next_a:
ia += 1
_snake_case : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_snake_case : str = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_snake_case : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_a : Optional[int] = get_logger()
_a : Optional[dict] = None
class _UpperCAmelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping]):
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
super().__init__(features=snake_case_ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'Expected {device} to be a `str` not {type(snake_case_ )}, as `jaxlib.xla_extension.Device` '
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_snake_case : Union[str, Any] = device if isinstance(snake_case_ , snake_case_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_snake_case : Tuple = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '
F'device: {str(jax.devices()[0] )}.' )
_snake_case : Dict = str(jax.devices()[0] )
_snake_case : List[Any] = jnp_array_kwargs
@staticmethod
def lowerCamelCase__ ( ):
import jax
return {str(snake_case_ ): device for device in jax.devices()}
def lowerCamelCase__ ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(snake_case_ , snake_case_ ) and column:
if all(
isinstance(snake_case_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case_ , axis=0 )
return column
def lowerCamelCase__ ( self , snake_case_ ):
import jax
import jax.numpy as jnp
if isinstance(snake_case_ , (str, bytes, type(snake_case_ )) ):
return value
elif isinstance(snake_case_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_snake_case : Union[str, Any] = {}
if isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_snake_case : Optional[int] = {"dtype": jnp.intaa}
else:
_snake_case : Union[str, Any] = {"dtype": jnp.intaa}
elif isinstance(snake_case_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_snake_case : List[str] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case_ , PIL.Image.Image ):
_snake_case : Optional[Any] = np.asarray(snake_case_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_snake_case : str = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase__ ( self , snake_case_ ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case_ , "__array__" ) and not isinstance(snake_case_ , jax.Array ):
_snake_case : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
elif isinstance(snake_case_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case_ ) for substruct in data_struct] )
return self._tensorize(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
return map_nested(self._recursive_tensorize , snake_case_ , map_list=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : List[str] = self.numpy_arrow_extractor().extract_row(snake_case_ )
_snake_case : Optional[int] = self.python_features_decoder.decode_row(snake_case_ )
return self.recursive_tensorize(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : Any = self.numpy_arrow_extractor().extract_column(snake_case_ )
_snake_case : Any = self.python_features_decoder.decode_column(snake_case_ , pa_table.column_names[0] )
_snake_case : Any = self.recursive_tensorize(snake_case_ )
_snake_case : str = self._consolidate(snake_case_ )
return column
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : int = self.numpy_arrow_extractor().extract_batch(snake_case_ )
_snake_case : Optional[int] = self.python_features_decoder.decode_batch(snake_case_ )
_snake_case : Union[str, Any] = self.recursive_tensorize(snake_case_ )
for column_name in batch:
_snake_case : int = self._consolidate(batch[column_name] )
return batch
| 87 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Input value must be an 'int' type" )
_snake_case : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
def a__ ( a : Optional[int] , a : Any , a : Optional[int] , a : Optional[Any] ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , a , a , a )
move_disk(a , a )
move_tower(height - 1 , a , a , a )
def a__ ( a : Optional[int] , a : Optional[int] ):
"""simple docstring"""
print("moving disk from" , a , "to" , a )
def a__ ( ):
"""simple docstring"""
_snake_case : Dict = int(input("Height of hanoi: " ).strip() )
move_tower(a , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 87 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=2 , snake_case_=99 , snake_case_=0 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=2 , snake_case_=4 , snake_case_="last" , snake_case_=True , snake_case_=None , snake_case_=0 , ):
_snake_case : List[str] = parent
_snake_case : Tuple = batch_size
_snake_case : Union[str, Any] = seq_length
_snake_case : Dict = is_training
_snake_case : Dict = use_input_lengths
_snake_case : List[str] = use_token_type_ids
_snake_case : Dict = use_labels
_snake_case : int = gelu_activation
_snake_case : List[str] = sinusoidal_embeddings
_snake_case : Tuple = causal
_snake_case : Tuple = asm
_snake_case : List[Any] = n_langs
_snake_case : List[Any] = vocab_size
_snake_case : List[Any] = n_special
_snake_case : Dict = hidden_size
_snake_case : str = num_hidden_layers
_snake_case : Optional[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout_prob
_snake_case : Optional[Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : List[Any] = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : Optional[Any] = num_labels
_snake_case : str = num_choices
_snake_case : Tuple = summary_type
_snake_case : Tuple = use_proj
_snake_case : List[Any] = scope
_snake_case : int = bos_token_id
def lowerCamelCase__ ( self ):
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Union[str, Any] = None
if self.use_input_lengths:
_snake_case : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_snake_case : Optional[int] = None
if self.use_token_type_ids:
_snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_snake_case : Optional[int] = None
_snake_case : str = None
_snake_case : Optional[Any] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Tuple = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : List[str] = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
_snake_case : Optional[Any] = model(snake_case_ , langs=snake_case_ )
_snake_case : str = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Optional[Any] = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : List[Any] = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Tuple = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
_snake_case : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Union[str, Any] = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = model(snake_case_ )
_snake_case : List[Any] = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
_snake_case : Optional[Any] = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((_snake_case) , ) : Dict = result_with_labels.to_tuple()
_snake_case : Optional[Any] = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
((_snake_case) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : Union[str, Any] = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : List[str] = self.num_labels
_snake_case : Union[str, Any] = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_snake_case : List[str] = self.num_choices
_snake_case : Union[str, Any] = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_snake_case : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Any = config_and_inputs
_snake_case : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _snake_case , _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase : Union[str, Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowercase : Optional[int] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Optional[Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
_snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = XLMModelTester(self )
_snake_case : Any = ConfigTester(self , config_class=snake_case_ , emb_dim=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ):
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
_snake_case : Optional[Any] = min_length + idx + 1
_snake_case : Optional[Any] = min_length + idx + 1
_snake_case : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ):
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
_snake_case : Dict = min_length + idx + 1
_snake_case : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def lowerCamelCase__ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case_ )
_snake_case : List[Any] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=snake_case_ ) # the president
_snake_case : List[Any] = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_snake_case : Any = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a__ ( a : Any ):
"""simple docstring"""
_snake_case : str = args.pruning_method
_snake_case : Any = args.threshold
_snake_case : Union[str, Any] = args.model_name_or_path.rstrip("/" )
_snake_case : Dict = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_snake_case : Dict = torch.load(os.path.join(a , "pytorch_model.bin" ) )
_snake_case : Union[str, Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case : str = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_snake_case : List[Any] = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_snake_case : Tuple = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_snake_case : Any = MagnitudeBinarizer.apply(inputs=a , threshold=a )
_snake_case : Union[str, Any] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case : List[Any] = name[:-6]
_snake_case : Any = model[f'{prefix_}mask_scores']
_snake_case : Tuple = TopKBinarizer.apply(a , a )
_snake_case : List[str] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case : str = name[:-6]
_snake_case : int = model[f'{prefix_}mask_scores']
_snake_case : Optional[int] = ThresholdBinarizer.apply(a , a , a )
_snake_case : List[str] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case : Optional[int] = name[:-6]
_snake_case : str = model[f'{prefix_}mask_scores']
_snake_case , _snake_case : Dict = -0.1, 1.1
_snake_case : Union[str, Any] = torch.sigmoid(a )
_snake_case : List[str] = s * (r - l) + l
_snake_case : Tuple = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case : str = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_snake_case : Optional[Any] = os.path.join(
os.path.dirname(a ) , f'bertarized_{os.path.basename(a )}' )
if not os.path.isdir(a ):
shutil.copytree(a , a )
print(f'\nCreated folder {target_model_path}' )
torch.save(a , os.path.join(a , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_a : Optional[int] = parser.parse_args()
main(args)
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_a : List[Any] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | 1 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_a : Optional[Any] = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a : List[str] , a : Any , a : Optional[Any] , a : Tuple , a : Union[str, Any]=False , a : List[str]=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_snake_case , _snake_case , _snake_case , _snake_case : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_snake_case : List[str] = cached_file(a , a , force_download=not use_cached_models )
_snake_case : str = config_class.from_json_file(a )
_snake_case : Optional[int] = True
_snake_case : Any = True
print(f'Building TensorFlow model from configuration: {config}' )
_snake_case : Union[str, Any] = model_class(a )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_snake_case : List[Any] = cached_file(
a , a , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_snake_case : str = load_pytorch_checkpoint_in_tfa_model(a , a )
if compare_with_pt_model:
_snake_case : List[str] = tf_model(tf_model.dummy_inputs , training=a ) # build the network
_snake_case : Dict = torch.load(a , map_location="cpu" )
_snake_case : str = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a , config=a , state_dict=a )
with torch.no_grad():
_snake_case : Optional[int] = pt_model(**pt_model.dummy_inputs )
_snake_case : Tuple = pto[0].numpy()
_snake_case : Optional[int] = tfo[0].numpy()
_snake_case : List[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2e-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a , save_format="h5" )
def a__ ( a : Dict , a : Optional[Any] , a : str=None , a : Any=None , a : Optional[Any]=False , a : List[str]=False , a : Union[str, Any]=False , a : List[Any]=False , ):
"""simple docstring"""
if args_model_type is None:
_snake_case : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
_snake_case : Tuple = [args_model_type]
for j, model_type in enumerate(a , start=1 ):
print("=" * 100 )
print(f' Converting model type {j}/{len(a )}: {model_type}' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_snake_case : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_snake_case : Dict = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a , a ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_snake_case : str = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(a )}: {model_shortcut_name} - model_type {model_type}' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
_snake_case : int = cached_file(a , a , force_download=not use_cached_models )
else:
_snake_case : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_snake_case : List[Any] = cached_file(a , a , force_download=not use_cached_models )
else:
_snake_case : Any = model_shortcut_name
if os.path.isfile(a ):
_snake_case : Optional[Any] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=a , pytorch_checkpoint_path=a , config_file=a , tf_dump_path=os.path.join(a , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=a , )
if remove_cached_files:
os.remove(a )
os.remove(a )
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
_a : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 87 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 1 |
"""simple docstring"""
def a__ ( a : Optional[int] ):
"""simple docstring"""
_snake_case : int = len(a )
while cur > 1:
# Find the maximum number in arr
_snake_case : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_snake_case : str = arr[mi::-1] + arr[mi + 1 : len(a )]
# Reverse whole list
_snake_case : Any = arr[cur - 1 :: -1] + arr[cur : len(a )]
cur -= 1
return arr
if __name__ == "__main__":
_a : str = input("""Enter numbers separated by a comma:\n""").strip()
_a : Any = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase__ ( *snake_case_ , **snake_case_ ):
pass
def a__ ( a : Image ):
"""simple docstring"""
_snake_case : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
__lowercase : Any = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = DepthEstimationPipeline(model=snake_case_ , image_processor=snake_case_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Any = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , snake_case_ )
import datasets
_snake_case : List[str] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_snake_case : Any = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , snake_case_ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def lowerCamelCase__ ( self ):
pass
@slow
@require_torch
def lowerCamelCase__ ( self ):
_snake_case : int = "Intel/dpt-large"
_snake_case : Any = pipeline("depth-estimation" , model=snake_case_ )
_snake_case : Optional[Any] = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_snake_case : List[Any] = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def lowerCamelCase__ ( self ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 87 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 | 1 |
"""simple docstring"""
import re
def a__ ( a : str ):
"""simple docstring"""
if len(re.findall("[ATCG]" , a ) ) != len(a ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Optional[Any] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : List[Any] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Union[str, Any] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Union[str, Any] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Optional[Any] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : List[str] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Optional[int] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Dict = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Any = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : List[str] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : List[str] = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : int = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Dict = ["""flax"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["flax"] )
| 87 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = 0
def lowerCamelCase__ ( self ):
_snake_case : str = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Dict = Path(snake_case_ ) / "preprocessor_config.json"
_snake_case : List[str] = Path(snake_case_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) )
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : str = Path(snake_case_ ) / "preprocessor_config.json"
_snake_case : Dict = Path(snake_case_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) )
_snake_case : Dict = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Tuple = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_snake_case : Any = Path(snake_case_ ) / "preprocessor_config.json"
_snake_case : Dict = Path(snake_case_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_snake_case : int = AutoImageProcessor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop("image_processor_type" )
_snake_case : Dict = CLIPImageProcessor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
_snake_case : List[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
_snake_case : Union[str, Any] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Tuple = Path(snake_case_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , )
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
snake_case_ , "clip-base is not a local folder and is not a valid model identifier" ):
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained("clip-base" )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
snake_case_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_snake_case : List[str] = AutoImageProcessor.from_pretrained(snake_case_ , revision="aaaaaa" )
def lowerCamelCase__ ( self ):
with self.assertRaisesRegex(
snake_case_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_snake_case : str = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def lowerCamelCase__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case_ ):
_snake_case : str = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ )
_snake_case : Dict = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def lowerCamelCase__ ( self ):
try:
AutoConfig.register("custom" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoImageProcessor.register(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case : Optional[Any] = Path(snake_case_ ) / "preprocessor_config.json"
_snake_case : Dict = Path(snake_case_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(snake_case_ , "w" ) , )
json.dump({"model_type": "clip"} , open(snake_case_ , "w" ) )
_snake_case : Tuple = CustomImageProcessor.from_pretrained(snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
_snake_case : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self ):
class _UpperCAmelCase ( _snake_case):
__lowercase : str = True
try:
AutoConfig.register("custom" , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_snake_case : Dict = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(snake_case_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
def a__ ( a : int , a : int ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87 |
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
_a : Union[str, Any] = tuple[int, int, int]
_a : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_a : str = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_a : Any = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_a : Dict = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_a : Union[str, Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_a : List[Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_a : int = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_a : int = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_a : Optional[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_a : Dict = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_a : Any = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_a : Optional[Any] = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def a__ ( a : RotorPositionT , a : RotorSelectionT , a : str ):
"""simple docstring"""
if (unique_rotsel := len(set(a ) )) < 3:
_snake_case : Optional[int] = f'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(a )
# Checks if rotor positions are valid
_snake_case , _snake_case , _snake_case : Any = rotpos
if not 0 < rotorposa <= len(a ):
_snake_case : Tuple = f'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(a )
if not 0 < rotorposa <= len(a ):
_snake_case : Union[str, Any] = f'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(a )
if not 0 < rotorposa <= len(a ):
_snake_case : List[str] = f'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(a )
# Validates string and returns dict
_snake_case : Tuple = _plugboard(a )
return rotpos, rotsel, pbdict
def a__ ( a : str ):
"""simple docstring"""
if not isinstance(a , a ):
_snake_case : Optional[int] = f'Plugboard setting isn\'t type string ({type(a )})'
raise TypeError(a )
elif len(a ) % 2 != 0:
_snake_case : Any = f'Odd number of symbols ({len(a )})'
raise Exception(a )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
_snake_case : List[str] = set()
for i in pbstring:
if i not in abc:
_snake_case : str = f'\'{i}\' not in list of symbols'
raise Exception(a )
elif i in tmppbl:
_snake_case : str = f'Duplicate symbol ({i})'
raise Exception(a )
else:
tmppbl.add(a )
del tmppbl
# Created the dictionary
_snake_case : List[str] = {}
for j in range(0 , len(a ) - 1 , 2 ):
_snake_case : Union[str, Any] = pbstring[j + 1]
_snake_case : List[str] = pbstring[j]
return pb
def a__ ( a : str , a : RotorPositionT , a : RotorSelectionT = (rotora, rotora, rotora) , a : str = "" , ):
"""simple docstring"""
_snake_case : Union[str, Any] = text.upper()
_snake_case , _snake_case , _snake_case : Dict = _validator(
a , a , plugb.upper() )
_snake_case , _snake_case , _snake_case : Union[str, Any] = rotor_position
_snake_case , _snake_case , _snake_case : int = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_snake_case : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_snake_case : List[Any] = plugboard[symbol]
# rotor ra --------------------------
_snake_case : Optional[int] = abc.index(a ) + rotorposa
_snake_case : int = rotora[index % len(a )]
# rotor rb --------------------------
_snake_case : str = abc.index(a ) + rotorposa
_snake_case : str = rotora[index % len(a )]
# rotor rc --------------------------
_snake_case : Any = abc.index(a ) + rotorposa
_snake_case : List[str] = rotora[index % len(a )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_snake_case : List[str] = reflector[symbol]
# 2nd rotors
_snake_case : Tuple = abc[rotora.index(a ) - rotorposa]
_snake_case : int = abc[rotora.index(a ) - rotorposa]
_snake_case : List[str] = abc[rotora.index(a ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_snake_case : Any = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(a ):
_snake_case : Optional[int] = 0
rotorposa += 1
if rotorposa >= len(a ):
_snake_case : int = 0
rotorposa += 1
if rotorposa >= len(a ):
_snake_case : List[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(a )
return "".join(a )
if __name__ == "__main__":
_a : Any = """This is my Python script that emulates the Enigma machine from WWII."""
_a : Optional[Any] = (1, 1, 1)
_a : Any = """pictures"""
_a : Union[str, Any] = (rotora, rotora, rotora)
_a : Optional[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 87 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_a : int = """src/transformers"""
_a : Optional[Any] = """docs/source/en/tasks"""
def a__ ( a : List[str] , a : List[str] , a : Dict ):
"""simple docstring"""
with open(a , "r" , encoding="utf-8" , newline="\n" ) as f:
_snake_case : Tuple = f.readlines()
# Find the start prompt.
_snake_case : List[Any] = 0
while not lines[start_index].startswith(a ):
start_index += 1
start_index += 1
_snake_case : Union[str, Any] = start_index
while not lines[end_index].startswith(a ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_a : Any = direct_transformers_import(TRANSFORMERS_PATH)
_a : Union[str, Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_a : Optional[Any] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def a__ ( a : Optional[int] ):
"""simple docstring"""
_snake_case : List[str] = TASK_GUIDE_TO_MODELS[task_guide]
_snake_case : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(a , set() )
_snake_case : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def a__ ( a : Any , a : str=False ):
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = _find_text_in_file(
filename=os.path.join(a , a ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_snake_case : int = get_model_list_for_task(a )
if current_list != new_list:
if overwrite:
with open(os.path.join(a , a ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_a : str = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_a : Any = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 87 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_a : Dict = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_a : str = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_a : Dict = requests.get(image_url).content
_a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_a : List[str] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_a : Optional[Any] = TaTokenizerFast
_a : Union[str, Any] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_a : List[str] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_a : Tuple = logging.get_logger(__name__)
def a__ ( a : Optional[Any] , a : Dict ):
"""simple docstring"""
_snake_case : Optional[int] = set()
_snake_case : Optional[Any] = []
def parse_line(a : Union[str, Any] ):
for line in fp:
if isinstance(a , a ):
_snake_case : str = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
_snake_case : str = "\n".join(a )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
_snake_case : Optional[int] = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
_snake_case : Optional[Any] = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def a__ ( a : int , a : int ):
"""simple docstring"""
_snake_case : Optional[int] = set()
_snake_case : Tuple = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def a__ ( a : List[Any] ):
"""simple docstring"""
return values.split("," )
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
_a : int = parser.parse_args()
_a : Optional[int] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_a : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_a : Any = extract_warnings(args.output_dir, args.targets)
_a : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 87 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 1 |
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( a : Dict , a : Union[str, Any] , a : int , a : Any ):
"""simple docstring"""
_snake_case : int = BigBirdConfig.from_json_file(a )
print(f'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
_snake_case : Union[str, Any] = BigBirdForQuestionAnswering(a )
else:
_snake_case : Dict = BigBirdForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a , a , is_trivia_qa=a )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(a )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_a : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = name
_snake_case : List[str] = value
_snake_case : Optional[Any] = weight
def __repr__( self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def lowerCamelCase__ ( self ):
return self.value
def lowerCamelCase__ ( self ):
return self.name
def lowerCamelCase__ ( self ):
return self.weight
def lowerCamelCase__ ( self ):
return self.value / self.weight
def a__ ( a : Optional[Any] , a : List[str] , a : Union[str, Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = []
for i in range(len(a ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( a : Optional[int] , a : Union[str, Any] , a : Dict ):
"""simple docstring"""
_snake_case : str = sorted(a , key=a , reverse=a )
_snake_case : str = []
_snake_case , _snake_case : Union[str, Any] = 0.0, 0.0
for i in range(len(a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_a : str = None
_a : List[str] = logging.get_logger(__name__)
_a : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_a : Tuple = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_a : Optional[Any] = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
_a : Dict = """▁"""
class _UpperCAmelCase ( _snake_case):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = AlbertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_snake_case : str = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
_snake_case : Dict = do_lower_case
_snake_case : Tuple = remove_space
_snake_case : Optional[int] = keep_accents
_snake_case : Optional[int] = vocab_file
_snake_case : Dict = False if not self.vocab_file else True
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Any = [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_snake_case : List[str] = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_a : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_a : Optional[int] = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
_a : Optional[int] = BeautifulSoup(res.text, """html.parser""")
_a : Tuple = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 87 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_a : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_a : List[Any] = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
_a : Union[str, Any] = """|""".join(sys.argv[1:])
_a : Dict = re.compile(rf'^({joined_dirs}).*?\.py$')
_a : int = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_a : int = logging.get_logger(__name__)
class _UpperCAmelCase ( _snake_case):
def __init__( self , **snake_case_ ):
requires_backends(self , ["bs4"] )
super().__init__(**snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : List[Any] = []
_snake_case : str = []
_snake_case : Optional[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_snake_case : List[str] = parent.find_all(child.name , recursive=snake_case_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(snake_case_ ) else next(i for i, s in enumerate(snake_case_ , 1 ) if s is child ) )
_snake_case : str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : Any = BeautifulSoup(snake_case_ , "html.parser" )
_snake_case : Dict = []
_snake_case : Union[str, Any] = []
_snake_case : Dict = []
for element in html_code.descendants:
if type(snake_case_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_snake_case : Optional[Any] = html.unescape(snake_case_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(snake_case_ )
_snake_case , _snake_case : Union[str, Any] = self.xpath_soup(snake_case_ )
stringaxtag_seq.append(snake_case_ )
stringaxsubs_seq.append(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Tuple = ""
for tagname, subs in zip(snake_case_ , snake_case_ ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self , snake_case_ ):
_snake_case : Optional[Any] = False
# Check that strings has a valid type
if isinstance(snake_case_ , snake_case_ ):
_snake_case : str = True
elif isinstance(snake_case_ , (list, tuple) ):
if len(snake_case_ ) == 0 or isinstance(html_strings[0] , snake_case_ ):
_snake_case : Optional[Any] = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F'but is of type {type(snake_case_ )}.' )
_snake_case : Union[str, Any] = bool(isinstance(snake_case_ , (list, tuple) ) and (isinstance(html_strings[0] , snake_case_ )) )
if not is_batched:
_snake_case : str = [html_strings]
# Get nodes + xpaths
_snake_case : str = []
_snake_case : List[Any] = []
for html_string in html_strings:
_snake_case , _snake_case , _snake_case : List[str] = self.get_three_from_single(snake_case_ )
nodes.append(snake_case_ )
_snake_case : str = []
for node, tag_list, sub_list in zip(snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.construct_xpath(snake_case_ , snake_case_ )
xpath_strings.append(snake_case_ )
xpaths.append(snake_case_ )
# return as Dict
_snake_case : List[str] = {"nodes": nodes, "xpaths": xpaths}
_snake_case : int = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs
| 87 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 | 1 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a__ ( a : int ):
"""simple docstring"""
return EnvironmentCommand()
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : List[str] = parser.add_parser("env" )
download_parser.set_defaults(func=snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = huggingface_hub.__version__
_snake_case : int = "not installed"
_snake_case : Tuple = "NA"
if is_torch_available():
import torch
_snake_case : int = torch.__version__
_snake_case : int = torch.cuda.is_available()
_snake_case : List[str] = "not installed"
if is_transformers_available():
import transformers
_snake_case : Optional[Any] = transformers.__version__
_snake_case : Tuple = "not installed"
if is_accelerate_available():
import accelerate
_snake_case : Optional[int] = accelerate.__version__
_snake_case : List[str] = "not installed"
if is_xformers_available():
import xformers
_snake_case : Any = xformers.__version__
_snake_case : int = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(snake_case_ ) )
return info
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 87 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Input value must be an 'int' type" )
_snake_case : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : Dict = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class _UpperCAmelCase ( _snake_case):
__lowercase : str = """cvt"""
def __init__( self , snake_case_=3 , snake_case_=[7, 3, 3] , snake_case_=[4, 2, 2] , snake_case_=[2, 1, 1] , snake_case_=[64, 1_92, 3_84] , snake_case_=[1, 3, 6] , snake_case_=[1, 2, 10] , snake_case_=[4.0, 4.0, 4.0] , snake_case_=[0.0, 0.0, 0.0] , snake_case_=[0.0, 0.0, 0.0] , snake_case_=[0.0, 0.0, 0.1] , snake_case_=[True, True, True] , snake_case_=[False, False, True] , snake_case_=["dw_bn", "dw_bn", "dw_bn"] , snake_case_=[3, 3, 3] , snake_case_=[1, 1, 1] , snake_case_=[2, 2, 2] , snake_case_=[1, 1, 1] , snake_case_=[1, 1, 1] , snake_case_=0.02 , snake_case_=1E-12 , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Optional[Any] = num_channels
_snake_case : str = patch_sizes
_snake_case : Union[str, Any] = patch_stride
_snake_case : Tuple = patch_padding
_snake_case : Tuple = embed_dim
_snake_case : Optional[Any] = num_heads
_snake_case : Optional[int] = depth
_snake_case : List[str] = mlp_ratio
_snake_case : str = attention_drop_rate
_snake_case : Optional[int] = drop_rate
_snake_case : str = drop_path_rate
_snake_case : Dict = qkv_bias
_snake_case : Union[str, Any] = cls_token
_snake_case : Any = qkv_projection_method
_snake_case : Union[str, Any] = kernel_qkv
_snake_case : List[str] = padding_kv
_snake_case : Tuple = stride_kv
_snake_case : List[Any] = padding_q
_snake_case : Dict = stride_q
_snake_case : Optional[Any] = initializer_range
_snake_case : int = layer_norm_eps
| 87 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
_a : int = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def a__ ( a : str , a : str ):
"""simple docstring"""
_snake_case : List[str] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
_snake_case : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_snake_case : int = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=a , output_all_encodings=a , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_snake_case : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_snake_case : Tuple = os.path.join(get_home_dir() , "models" )
_snake_case : str = _load_vocab(a , a , a , cls=a )
_snake_case : Tuple = nlp.model.BERTModel(
a , len(a ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
_snake_case : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
_snake_case : str = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(a ),
}
_snake_case : Any = BertConfig.from_dict(a )
_snake_case : Dict = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a : List[Any] , a : List[str] ):
_snake_case : Optional[Any] = hf_param.shape
_snake_case : str = to_torch(params[gluon_param] )
_snake_case : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
_snake_case : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
_snake_case : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
_snake_case : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
_snake_case : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_snake_case : Dict = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_snake_case : BertSelfAttention = layer.attention.self
_snake_case : int = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
_snake_case : Dict = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
_snake_case : Dict = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
_snake_case : str = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
_snake_case : Optional[int] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
_snake_case : List[str] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
_snake_case : BertSelfOutput = layer.attention.output
_snake_case : Any = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
_snake_case : Dict = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
_snake_case : int = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
_snake_case : Any = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
_snake_case : BertIntermediate = layer.intermediate
_snake_case : int = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
_snake_case : Union[str, Any] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
_snake_case : BertOutput = layer.output
_snake_case : int = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
_snake_case : Dict = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
_snake_case : Any = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
_snake_case : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_snake_case : Optional[Any] = RobertaTokenizer.from_pretrained("roberta-base" )
_snake_case : Any = tokenizer.encode_plus(a )["input_ids"]
# Get gluon output
_snake_case : Union[str, Any] = mx.nd.array([input_ids] )
_snake_case : Optional[int] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
_snake_case : str = BertModel.from_pretrained(a )
hf_bort_model.eval()
_snake_case : str = tokenizer.encode_plus(a , return_tensors="pt" )
_snake_case : Tuple = hf_bort_model(**a )[0]
_snake_case : Any = output_gluon[0].asnumpy()
_snake_case : Tuple = output_hf[0].detach().numpy()
_snake_case : Optional[int] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_snake_case : List[Any] = np.allclose(a , a , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Tuple = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _UpperCAmelCase ( unittest.TestCase):
__lowercase : int = JukeboxTokenizer
__lowercase : Any = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def lowerCamelCase__ ( self ):
import torch
_snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
_snake_case : Dict = tokenizer(**self.metas )["input_ids"]
# fmt: off
_snake_case : Optional[int] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self ):
import torch
_snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
_snake_case : Dict = tokenizer(**self.metas )["input_ids"]
# fmt: off
_snake_case : Tuple = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(a , 2 ) + pow(a , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Tuple = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Dict = GPTSwaTokenizer
__lowercase : List[str] = False
__lowercase : List[Any] = True
__lowercase : List[Any] = False
def lowerCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_snake_case : Dict = GPTSwaTokenizer(snake_case_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : List[Any] = "This is a test"
_snake_case : str = "This is a test"
return input_text, output_text
def lowerCamelCase__ ( self ):
_snake_case : Dict = "<s>"
_snake_case : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case_ ) , 20_00 )
def lowerCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = GPTSwaTokenizer(snake_case_ )
_snake_case : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
_snake_case : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
_snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case_ )
# fmt: off
self.assertListEqual(
snake_case_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = GPTSwaTokenizer(snake_case_ )
_snake_case : Optional[int] = ["This is a test", "I was born in 92000, and this is falsé."]
_snake_case : List[str] = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case_ , snake_case_ ):
self.assertListEqual(tokenizer.encode_fast(snake_case_ ) , snake_case_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case_ , snake_case_ ):
self.assertEqual(tokenizer.decode_fast(snake_case_ ) , snake_case_ )
@slow
def lowerCamelCase__ ( self ):
_snake_case : Tuple = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_snake_case : Optional[Any] = {"input_ids": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case_ , )
| 87 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _UpperCAmelCase :
def __init__( self , snake_case_ , ):
_snake_case : Any = parent
_snake_case : int = 13
_snake_case : List[str] = 7
_snake_case : int = 30
_snake_case : List[str] = self.seq_length + self.mem_len
_snake_case : Union[str, Any] = 15
_snake_case : Dict = True
_snake_case : Optional[int] = True
_snake_case : Optional[int] = 99
_snake_case : Dict = [10, 50, 80]
_snake_case : Dict = 32
_snake_case : Dict = 32
_snake_case : int = 4
_snake_case : int = 8
_snake_case : Any = 1_28
_snake_case : Dict = 2
_snake_case : Any = 2
_snake_case : Tuple = None
_snake_case : str = 1
_snake_case : Union[str, Any] = 0
_snake_case : str = 3
_snake_case : List[Any] = self.vocab_size - 1
_snake_case : Tuple = 0.01
def lowerCamelCase__ ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Any = None
if self.use_labels:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Dict = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase__ ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFTransfoXLModel(snake_case_ )
_snake_case , _snake_case : Optional[int] = model(snake_case_ ).to_tuple()
_snake_case : Any = {"input_ids": input_ids_a, "mems": mems_a}
_snake_case , _snake_case : Optional[int] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = TFTransfoXLLMHeadModel(snake_case_ )
_snake_case , _snake_case : str = model(snake_case_ ).to_tuple()
_snake_case : List[Any] = {"input_ids": input_ids_a, "labels": lm_labels}
_snake_case , _snake_case : str = model(snake_case_ ).to_tuple()
_snake_case , _snake_case : Dict = model([input_ids_a, mems_a] ).to_tuple()
_snake_case : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
_snake_case , _snake_case : Optional[Any] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFTransfoXLForSequenceClassification(snake_case_ )
_snake_case : Any = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : Optional[int] = config_and_inputs
_snake_case : Union[str, Any] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Optional[int] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowercase : int = () if is_tf_available() else ()
__lowercase : List[str] = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowercase : int = False
__lowercase : Tuple = False
__lowercase : str = False
__lowercase : Union[str, Any] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase__ ( self ):
_snake_case : Dict = TFTransfoXLModelTester(self )
_snake_case : Dict = ConfigTester(self , config_class=snake_case_ , d_embed=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
self.model_tester.set_seed()
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case_ )
def lowerCamelCase__ ( self ):
self.model_tester.set_seed()
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[int] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_snake_case : Dict = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_snake_case : Union[str, Any] = model.get_output_embeddings()
assert isinstance(snake_case_ , tf.keras.layers.Layer )
_snake_case : Dict = model.get_bias()
assert name is None
else:
_snake_case : str = model.get_output_embeddings()
assert x is None
_snake_case : Dict = model.get_bias()
assert name is None
def lowerCamelCase__ ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase__ ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[Any] = TFTransfoXLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def lowerCamelCase__ ( self ):
pass
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
_snake_case : str = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_snake_case : List[Any] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_snake_case : Dict = model.generate(snake_case_ , max_length=2_00 , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ )
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def a__ ( a : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : int = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _UpperCAmelCase ( _snake_case):
@staticmethod
def lowerCamelCase__ ( snake_case_ ):
_snake_case : Dict = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case_ , required=snake_case_ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case_ , required=snake_case_ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case_ , required=snake_case_ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case_ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case_ , default=snake_case_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , ):
_snake_case : str = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'Loading model {model_type}' )
_snake_case : Optional[int] = model_type
_snake_case : Any = tf_checkpoint
_snake_case : Optional[int] = pytorch_dump_output
_snake_case : Tuple = config
_snake_case : Tuple = finetuning_task_name
def lowerCamelCase__ ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
if "ckpt" in self._tf_checkpoint.lower():
_snake_case : int = self._tf_checkpoint
_snake_case : Optional[Any] = ""
else:
_snake_case : Optional[int] = self._tf_checkpoint
_snake_case : List[str] = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case_ , self._config , self._pytorch_dump_output , snake_case_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 87 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( _snake_case):
__lowercase : Dict = """naver-clova-ix/donut-base-finetuned-docvqa"""
__lowercase : List[Any] = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__lowercase : int = """document_qa"""
__lowercase : Optional[Any] = AutoProcessor
__lowercase : List[str] = VisionEncoderDecoderModel
__lowercase : Union[str, Any] = ["""image""", """text"""]
__lowercase : List[Any] = ["""text"""]
def __init__( self , *snake_case_ , **snake_case_ ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_snake_case : Tuple = task_prompt.replace("{user_input}" , snake_case_ )
_snake_case : Any = self.pre_processor.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_tensors="pt" ).input_ids
_snake_case : Optional[int] = self.pre_processor(snake_case_ , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowerCamelCase__ ( self , snake_case_ ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case_ , ).sequences
def lowerCamelCase__ ( self , snake_case_ ):
_snake_case : Tuple = self.pre_processor.batch_decode(snake_case_ )[0]
_snake_case : Any = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_snake_case : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_snake_case : Any = re.sub(r"<.*?>" , "" , snake_case_ , count=1 ).strip() # remove first task start token
_snake_case : Optional[Any] = self.pre_processor.tokenajson(snake_case_ )
return sequence["answer"]
| 87 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Dict = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Optional[int] = False
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_=False ):
_snake_case : Union[str, Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
_snake_case : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : Optional[Any] = parent
_snake_case : List[Any] = batch_size
_snake_case : Optional[int] = seq_length
_snake_case : Dict = is_training
_snake_case : Union[str, Any] = use_input_mask
_snake_case : List[Any] = use_token_type_ids
_snake_case : int = use_labels
_snake_case : Dict = vocab_size
_snake_case : Tuple = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : Tuple = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : str = type_vocab_size
_snake_case : Any = type_sequence_label_size
_snake_case : Optional[int] = initializer_range
_snake_case : List[Any] = num_labels
_snake_case : Optional[int] = num_choices
_snake_case : Optional[int] = scope
_snake_case : Any = embedding_size
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : Optional[Any] = None
if self.use_input_mask:
_snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : List[str] = None
if self.use_token_type_ids:
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : Dict = None
_snake_case : Tuple = None
_snake_case : str = None
if self.use_labels:
_snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Tuple = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFMobileBertModel(config=snake_case_ )
_snake_case : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Optional[Any] = model(snake_case_ )
_snake_case : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFMobileBertForMaskedLM(config=snake_case_ )
_snake_case : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=snake_case_ )
_snake_case : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Tuple = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = TFMobileBertForPreTraining(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = self.num_labels
_snake_case : str = TFMobileBertForSequenceClassification(config=snake_case_ )
_snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Any = self.num_choices
_snake_case : Tuple = TFMobileBertForMultipleChoice(config=snake_case_ )
_snake_case : List[Any] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Tuple = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Union[str, Any] = self.num_labels
_snake_case : Optional[int] = TFMobileBertForTokenClassification(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = TFMobileBertForQuestionAnswering(config=snake_case_ )
_snake_case : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase__ ( self ):
_snake_case : int = TFMobileBertModelTest.TFMobileBertModelTester(self )
_snake_case : Optional[Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
_snake_case : str = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
_snake_case : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : Union[str, Any] = model(snake_case_ )[0]
_snake_case : int = [1, 6, 3_05_22]
self.assertEqual(output.shape , snake_case_ )
_snake_case : Optional[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Optional[int] = logging.get_logger(__name__)
_a : Dict = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _snake_case):
__lowercase : str = """table-transformer"""
__lowercase : Optional[Any] = ["""past_key_values"""]
__lowercase : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , snake_case_=True , snake_case_=None , snake_case_=3 , snake_case_=1_00 , snake_case_=6 , snake_case_=20_48 , snake_case_=8 , snake_case_=6 , snake_case_=20_48 , snake_case_=8 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_="relu" , snake_case_=2_56 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1.0 , snake_case_=False , snake_case_="sine" , snake_case_="resnet50" , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=1 , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_snake_case : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(snake_case_ , snake_case_ ):
_snake_case : List[str] = backbone_config.get("model_type" )
_snake_case : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_snake_case : List[Any] = config_class.from_dict(snake_case_ )
# set timm attributes to None
_snake_case , _snake_case , _snake_case : Optional[int] = None, None, None
_snake_case : int = use_timm_backbone
_snake_case : Tuple = backbone_config
_snake_case : int = num_channels
_snake_case : Any = num_queries
_snake_case : Optional[Any] = d_model
_snake_case : List[Any] = encoder_ffn_dim
_snake_case : List[str] = encoder_layers
_snake_case : List[str] = encoder_attention_heads
_snake_case : Optional[int] = decoder_ffn_dim
_snake_case : str = decoder_layers
_snake_case : int = decoder_attention_heads
_snake_case : Optional[Any] = dropout
_snake_case : List[str] = attention_dropout
_snake_case : Dict = activation_dropout
_snake_case : int = activation_function
_snake_case : Dict = init_std
_snake_case : Optional[int] = init_xavier_std
_snake_case : List[str] = encoder_layerdrop
_snake_case : Optional[Any] = decoder_layerdrop
_snake_case : Union[str, Any] = encoder_layers
_snake_case : int = auxiliary_loss
_snake_case : Any = position_embedding_type
_snake_case : List[Any] = backbone
_snake_case : Tuple = use_pretrained_backbone
_snake_case : Any = dilation
# Hungarian matcher
_snake_case : List[str] = class_cost
_snake_case : Optional[int] = bbox_cost
_snake_case : str = giou_cost
# Loss coefficients
_snake_case : Dict = mask_loss_coefficient
_snake_case : Any = dice_loss_coefficient
_snake_case : Optional[Any] = bbox_loss_coefficient
_snake_case : Tuple = giou_loss_coefficient
_snake_case : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase__ ( self ):
return self.encoder_attention_heads
@property
def lowerCamelCase__ ( self ):
return self.d_model
class _UpperCAmelCase ( _snake_case):
__lowercase : List[str] = version.parse("""1.11""")
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def lowerCamelCase__ ( self ):
return 1E-5
@property
def lowerCamelCase__ ( self ):
return 12
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Optional[int] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """audio-spectrogram-transformer"""
def __init__( self , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=16 , snake_case_=True , snake_case_=10 , snake_case_=10 , snake_case_=10_24 , snake_case_=1_28 , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : List[str] = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Union[str, Any] = intermediate_size
_snake_case : str = hidden_act
_snake_case : List[str] = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Any = initializer_range
_snake_case : Tuple = layer_norm_eps
_snake_case : Dict = patch_size
_snake_case : List[str] = qkv_bias
_snake_case : Dict = frequency_stride
_snake_case : List[str] = time_stride
_snake_case : Dict = max_length
_snake_case : str = num_mel_bins
| 87 |
"""simple docstring"""
def a__ ( a : list , a : int , a : int = 0 , a : int = 0 ):
"""simple docstring"""
_snake_case : Optional[int] = right or len(a ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a , a , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def a__ ( a : bytes ):
"""simple docstring"""
if len(a ) != 32:
raise ValueError("Input must be of length 32" )
_snake_case : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a__ ( a : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
_snake_case : Optional[Any] = format(a , "08x" )[-8:]
_snake_case : List[str] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def a__ ( a : bytes ):
"""simple docstring"""
_snake_case : Union[str, Any] = b""
for char in message:
bit_string += format(a , "08b" ).encode("utf-8" )
_snake_case : Dict = format(len(a ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(a ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a__ ( a : bytes ):
"""simple docstring"""
if len(a ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(a ) , 512 ):
_snake_case : Optional[Any] = bit_string[pos : pos + 512]
_snake_case : Union[str, Any] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a__ ( a : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
_snake_case : int = format(a , "032b" )
_snake_case : Optional[Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(a , 2 )
def a__ ( a : int , a : int ):
"""simple docstring"""
return (a + b) % 2**32
def a__ ( a : int , a : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a__ ( a : bytes ):
"""simple docstring"""
_snake_case : str = preprocess(a )
_snake_case : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_snake_case : int = 0X67452301
_snake_case : int = 0Xefcdab89
_snake_case : Optional[int] = 0X98badcfe
_snake_case : List[str] = 0X10325476
_snake_case : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(a ):
_snake_case : Tuple = aa
_snake_case : Dict = ba
_snake_case : Union[str, Any] = ca
_snake_case : str = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_snake_case : List[str] = d ^ (b & (c ^ d))
_snake_case : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_snake_case : List[Any] = c ^ (d & (b ^ c))
_snake_case : Optional[Any] = (5 * i + 1) % 16
elif i <= 47:
_snake_case : Union[str, Any] = b ^ c ^ d
_snake_case : str = (3 * i + 5) % 16
else:
_snake_case : Union[str, Any] = c ^ (b | not_aa(a ))
_snake_case : str = (7 * i) % 16
_snake_case : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
_snake_case : List[str] = d
_snake_case : Any = c
_snake_case : int = b
_snake_case : Optional[int] = sum_aa(a , left_rotate_aa(a , shift_amounts[i] ) )
# Add hashed chunk to running total
_snake_case : Optional[int] = sum_aa(a , a )
_snake_case : int = sum_aa(a , a )
_snake_case : Optional[Any] = sum_aa(a , a )
_snake_case : Any = sum_aa(a , a )
_snake_case : List[str] = reformat_hex(a ) + reformat_hex(a ) + reformat_hex(a ) + reformat_hex(a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_a : Tuple = logging.get_logger(__name__)
class _UpperCAmelCase ( _snake_case):
__lowercase : Any = """vision-encoder-decoder"""
__lowercase : Union[str, Any] = True
def __init__( self , **snake_case_ ):
super().__init__(**snake_case_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
_snake_case : Optional[int] = kwargs.pop("encoder" )
_snake_case : Dict = encoder_config.pop("model_type" )
_snake_case : Optional[int] = kwargs.pop("decoder" )
_snake_case : Optional[int] = decoder_config.pop("model_type" )
_snake_case : Optional[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
_snake_case : Dict = AutoConfig.for_model(snake_case_ , **snake_case_ )
_snake_case : Any = True
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_snake_case : Union[str, Any] = True
_snake_case : Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : str = copy.deepcopy(self.__dict__ )
_snake_case : Tuple = self.encoder.to_dict()
_snake_case : List[str] = self.decoder.to_dict()
_snake_case : Optional[Any] = self.__class__.model_type
return output
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = version.parse("""1.11""")
@property
def lowerCamelCase__ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self ):
return 1E-4
@property
def lowerCamelCase__ ( self ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class _UpperCAmelCase ( _snake_case):
@property
def lowerCamelCase__ ( self ):
_snake_case : Dict = OrderedDict()
_snake_case : List[str] = {0: "batch", 1: "past_decoder_sequence + sequence"}
_snake_case : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
_snake_case : Dict = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
import torch
_snake_case : Optional[Any] = OrderedDict()
_snake_case : Any = super().generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
_snake_case , _snake_case : str = dummy_input["input_ids"].shape
_snake_case : List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
_snake_case : Dict = dummy_input.pop("input_ids" )
_snake_case : List[Any] = dummy_input.pop("attention_mask" )
_snake_case : Optional[Any] = torch.zeros(snake_case_ )
return common_inputs
class _UpperCAmelCase ( _snake_case):
@property
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self , snake_case_ ):
return VisionEncoderDecoderEncoderOnnxConfig(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = "default" ):
_snake_case : Any = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(snake_case_ , snake_case_ )
| 87 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_a : Dict = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_a : str = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_a : Dict = requests.get(image_url).content
_a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 87 | 1 |
"""simple docstring"""
_a : Tuple = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_a : List[Any] = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def a__ ( a : float , a : str , a : str ):
"""simple docstring"""
_snake_case : List[Any] = from_type.lower().strip("s" )
_snake_case : Dict = to_type.lower().strip("s" )
_snake_case : Tuple = UNIT_SYMBOL.get(a , a )
_snake_case : Optional[int] = UNIT_SYMBOL.get(a , a )
if from_sanitized not in METRIC_CONVERSION:
_snake_case : Tuple = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(a )}'
)
raise ValueError(a )
if to_sanitized not in METRIC_CONVERSION:
_snake_case : int = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(a )}'
)
raise ValueError(a )
_snake_case : Dict = METRIC_CONVERSION[from_sanitized]
_snake_case : Union[str, Any] = METRIC_CONVERSION[to_sanitized]
_snake_case : Dict = 1
if from_exponent > to_exponent:
_snake_case : Union[str, Any] = from_exponent - to_exponent
else:
_snake_case : Tuple = -(to_exponent - from_exponent)
return value * pow(10 , a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : Optional[int] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[int] = logging.get_logger(__name__)
_a : List[str] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( _snake_case , _snake_case):
__lowercase : List[Any] = """convnextv2"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=4 , snake_case_=None , snake_case_=None , snake_case_="gelu" , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0.0 , snake_case_=2_24 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**snake_case_ )
_snake_case : Tuple = num_channels
_snake_case : Optional[int] = patch_size
_snake_case : Tuple = num_stages
_snake_case : int = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_snake_case : str = [3, 3, 9, 3] if depths is None else depths
_snake_case : int = hidden_act
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = layer_norm_eps
_snake_case : Optional[int] = drop_path_rate
_snake_case : Union[str, Any] = image_size
_snake_case : List[Any] = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 87 |
"""simple docstring"""
import argparse
import json
import subprocess
def a__ ( a : Optional[Any] , a : Optional[int] ):
"""simple docstring"""
_snake_case : str = []
_snake_case : Optional[Any] = (
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
_snake_case : Dict = subprocess.run(a , shell=a , stdout=subprocess.PIPE )
_snake_case : Tuple = output.stdout.decode("utf-8" )
_snake_case : List[str] = json.loads(a )
_snake_case : Any = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(a )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(a ) )
if len(a ) > 0:
_snake_case : Any = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def a__ ( a : Optional[int] ):
"""simple docstring"""
return values.split("," )
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
_a : List[str] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 87 | 1 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a : Tuple = _symbol_database.Default()
_a : Tuple = _descriptor_pool.Default().AddSerializedFile(
B"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
_a : str = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a : Optional[Any] = None
_a : Union[str, Any] = B"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a : List[Any] = 45
_a : Union[str, Any] = 1_581
_a : List[str] = 1_517
_a : Tuple = 1_570
_a : int = 1_584
_a : str = 1_793
_a : int = 1_795
_a : List[Any] = 1_916
_a : int = 1_864
_a : List[str] = 1_905
_a : Dict = 1_919
_a : Tuple = 2_429
_a : List[Any] = 2_208
_a : List[Any] = 2_418
_a : List[str] = 2_323
_a : str = 2_407
# @@protoc_insertion_point(module_scope)
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=_snake_case):
__lowercase : Union[str, Any] = ["""transformers""", """torch""", """note_seq"""]
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def lowerCamelCase__ ( cls , *snake_case_ , **snake_case_ ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 87 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def a__ ( a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[Any] = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_a : Tuple = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_a : str = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_a : Optional[int] = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def lowerCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case_ , hypotheses=snake_case_ , min_len=snake_case_ , max_len=snake_case_ )
}
| 87 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Any = TextToVideoSDPipeline
__lowercase : str = TEXT_TO_IMAGE_PARAMS
__lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowercase : Optional[int] = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
def lowerCamelCase__ ( self ):
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
_snake_case : Tuple = CLIPTextModel(snake_case_ )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ):
if str(snake_case_ ).startswith("mps" ):
_snake_case : str = torch.manual_seed(snake_case_ )
else:
_snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_snake_case : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowerCamelCase__ ( self ):
_snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case : Optional[Any] = self.get_dummy_components()
_snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ )
_snake_case : List[str] = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_snake_case : int = self.get_dummy_inputs(snake_case_ )
_snake_case : Union[str, Any] = "np"
_snake_case : Dict = sd_pipe(**snake_case_ ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowerCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
_snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_snake_case : Tuple = pipe.to("cuda" )
_snake_case : List[Any] = "Spiderman is surfing"
_snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames
_snake_case : int = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def lowerCamelCase__ ( self ):
_snake_case : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
_snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
_snake_case : int = pipe.to("cuda" )
_snake_case : Any = "Spiderman is surfing"
_snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 )
_snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames
_snake_case : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 87 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( _snake_case):
__lowercase : Tuple = """AutoTokenizer"""
__lowercase : Dict = ["""tokenizer"""]
__lowercase : Optional[Any] = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ )
_snake_case : List[str] = speaker_embeddings
@classmethod
def lowerCamelCase__ ( cls , snake_case_ , snake_case_="speaker_embeddings_path.json" , **snake_case_ ):
if speaker_embeddings_dict_path is not None:
_snake_case : List[Any] = get_file_from_repo(
snake_case_ , snake_case_ , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(snake_case_ , snake_case_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_snake_case : Dict = None
else:
with open(snake_case_ ) as speaker_embeddings_json:
_snake_case : int = json.load(snake_case_ )
else:
_snake_case : int = None
_snake_case : Dict = AutoTokenizer.from_pretrained(snake_case_ , **snake_case_ )
return cls(tokenizer=snake_case_ , speaker_embeddings=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_="speaker_embeddings_path.json" , snake_case_="speaker_embeddings" , snake_case_ = False , **snake_case_ , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(snake_case_ , snake_case_ , "v2" ) , exist_ok=snake_case_ )
_snake_case : int = {}
_snake_case : int = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_snake_case : Union[str, Any] = self._load_voice_preset(snake_case_ )
_snake_case : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , snake_case_ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=snake_case_ , )
_snake_case : Optional[int] = os.path.join(snake_case_ , F'{prompt_key}_{key}.npy' )
_snake_case : Union[str, Any] = tmp_dict
with open(os.path.join(snake_case_ , snake_case_ ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
super().save_pretrained(snake_case_ , snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ = None , **snake_case_ ):
_snake_case : Dict = self.speaker_embeddings[voice_preset]
_snake_case : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_snake_case : str = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , snake_case_ ) , cache_dir=kwargs.pop("cache_dir" , snake_case_ ) , force_download=kwargs.pop("force_download" , snake_case_ ) , proxies=kwargs.pop("proxies" , snake_case_ ) , resume_download=kwargs.pop("resume_download" , snake_case_ ) , local_files_only=kwargs.pop("local_files_only" , snake_case_ ) , use_auth_token=kwargs.pop("use_auth_token" , snake_case_ ) , revision=kwargs.pop("revision" , snake_case_ ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_snake_case : str = np.load(snake_case_ )
return voice_preset_dict
def lowerCamelCase__ ( self , snake_case_ = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , snake_case_=None , snake_case_=None , snake_case_="pt" , snake_case_=2_56 , snake_case_=False , snake_case_=True , snake_case_=False , **snake_case_ , ):
if voice_preset is not None and not isinstance(snake_case_ , snake_case_ ):
if (
isinstance(snake_case_ , snake_case_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_snake_case : Any = self._load_voice_preset(snake_case_ )
else:
if isinstance(snake_case_ , snake_case_ ) and not voice_preset.endswith(".npz" ):
_snake_case : Any = voice_preset + ".npz"
_snake_case : List[Any] = np.load(snake_case_ )
if voice_preset is not None:
self._validate_voice_preset_dict(snake_case_ , **snake_case_ )
_snake_case : List[Any] = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
_snake_case : Tuple = self.tokenizer(
snake_case_ , return_tensors=snake_case_ , padding="max_length" , max_length=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
if voice_preset is not None:
_snake_case : Union[str, Any] = voice_preset
return encoded_text
| 87 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _snake_case):
__lowercase : int = """EncodecFeatureExtractor"""
__lowercase : str = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
_snake_case : Dict = self.feature_extractor
_snake_case : Any = False
def lowerCamelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=snake_case_ , language=snake_case_ , no_timestamps=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case_ , **snake_case_ )
_snake_case : str = kwargs.pop("audio" , snake_case_ )
_snake_case : Optional[int] = kwargs.pop("sampling_rate" , snake_case_ )
_snake_case : Optional[Any] = kwargs.pop("text" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Any = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_snake_case : Any = self.tokenizer(snake_case_ , **snake_case_ )
if audio is not None:
_snake_case : Any = self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_snake_case : str = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_snake_case : List[str] = audio_inputs["padding_mask"]
return inputs
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
_snake_case : Tuple = kwargs.pop("audio" , snake_case_ )
_snake_case : List[str] = kwargs.pop("padding_mask" , snake_case_ )
if len(snake_case_ ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Dict = args[1:]
if audio_values is not None:
return self._decode_audio(snake_case_ , padding_mask=snake_case_ )
else:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ = None ):
_snake_case : Optional[int] = to_numpy(snake_case_ )
_snake_case , _snake_case , _snake_case : Tuple = audio_values.shape
if padding_mask is None:
return list(snake_case_ )
_snake_case : Optional[int] = to_numpy(snake_case_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_snake_case : Any = seq_len - padding_mask.shape[-1]
_snake_case : Optional[Any] = 1 - self.feature_extractor.padding_value
_snake_case : Optional[int] = np.pad(snake_case_ , ((0, 0), (0, difference)) , "constant" , constant_values=snake_case_ )
_snake_case : Any = audio_values.tolist()
for i in range(snake_case_ ):
_snake_case : Tuple = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_snake_case : Tuple = sliced_audio.reshape(snake_case_ , -1 )
return audio_values
| 87 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Optional[int] = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a : str = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""YolosFeatureExtractor"""]
_a : List[Any] = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 | 1 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_a : List[Any] = 637_8137.0
_a : int = 635_6752.31_4245
_a : int = 6_378_137
def a__ ( a : float , a : float , a : float , a : float ):
"""simple docstring"""
_snake_case : Optional[int] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_snake_case : int = atan((1 - flattening) * tan(radians(a ) ) )
_snake_case : Dict = atan((1 - flattening) * tan(radians(a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_snake_case : List[Any] = haversine_distance(a , a , a , a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_snake_case : Tuple = (b_lata + b_lata) / 2
_snake_case : List[Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_snake_case : Optional[Any] = (sin(a ) ** 2) * (cos(a ) ** 2)
_snake_case : Optional[int] = cos(sigma / 2 ) ** 2
_snake_case : Any = (sigma - sin(a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_snake_case : Optional[Any] = (cos(a ) ** 2) * (sin(a ) ** 2)
_snake_case : str = sin(sigma / 2 ) ** 2
_snake_case : List[str] = (sigma + sin(a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = dataset
_snake_case : str = process
_snake_case : int = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
_snake_case : Union[str, Any] = self.dataset[i]
_snake_case : Optional[Any] = self.process(snake_case_ , **self.params )
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
_snake_case : Union[str, Any] = loader
_snake_case : Tuple = infer
_snake_case : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_snake_case : int = None
_snake_case : int = loader_batch_size
# Internal bookkeeping
_snake_case : Any = None
_snake_case : Dict = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_snake_case : int = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_snake_case : List[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case_ , snake_case_ ):
# Convert ModelOutput to tuple first
_snake_case : Tuple = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_snake_case : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case_ , snake_case_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_snake_case : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_snake_case : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_snake_case : Tuple = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_snake_case : Union[str, Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_snake_case : List[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_snake_case : int = self._loader_batch_data.__class__(snake_case_ )
self._loader_batch_index += 1
return result
def lowerCamelCase__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_snake_case : Tuple = next(self.iterator )
_snake_case : Any = self.infer(snake_case_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Optional[int] = list(processed.keys() )[0]
_snake_case : List[str] = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Dict = len(snake_case_ )
else:
_snake_case : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_snake_case : str = processed
_snake_case : List[Any] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
super().__init__(snake_case_ , snake_case_ , snake_case_ )
def __iter__( self ):
_snake_case : Tuple = iter(self.loader )
_snake_case : List[Any] = None
return self
def lowerCamelCase__ ( self ):
if self.subiterator is None:
_snake_case : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_snake_case : Union[str, Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_snake_case : str = self.infer(next(self.iterator ) , **self.params )
_snake_case : Tuple = next(self.subiterator )
return processed
class _UpperCAmelCase ( _snake_case):
def __iter__( self ):
_snake_case : Optional[Any] = iter(self.loader )
return self
def lowerCamelCase__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_snake_case : Optional[Any] = False
_snake_case : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : str = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
while not is_last:
_snake_case : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case_ , torch.Tensor ):
_snake_case : Union[str, Any] = processed
else:
_snake_case : Tuple = list(processed.keys() )[0]
_snake_case : Tuple = processed[key]
if isinstance(snake_case_ , snake_case_ ):
_snake_case : Any = len(snake_case_ )
else:
_snake_case : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_snake_case : Dict = observed_batch_size
_snake_case : List[Any] = processed
_snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
_snake_case : Union[str, Any] = self.loader_batch_item()
_snake_case : int = item.pop("is_last" )
accumulator.append(snake_case_ )
if is_last:
return accumulator
else:
_snake_case : Dict = processed
_snake_case : Dict = item.pop("is_last" )
accumulator.append(snake_case_ )
return accumulator
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ ):
_snake_case : str = dataset
_snake_case : Any = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return self.dataset[i][self.key]
class _UpperCAmelCase ( _snake_case):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : int = dataset
_snake_case : Any = keya
_snake_case : int = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , snake_case_ ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
_snake_case : str = parent
_snake_case : List[Any] = 13
_snake_case : Any = 7
_snake_case : Union[str, Any] = True
_snake_case : str = True
_snake_case : Optional[int] = True
_snake_case : str = True
_snake_case : List[str] = 99
_snake_case : int = 3_84
_snake_case : Optional[int] = 2
_snake_case : Optional[Any] = 4
_snake_case : Optional[int] = 37
_snake_case : int = "gelu"
_snake_case : Any = 0.1
_snake_case : Any = 0.1
_snake_case : Tuple = 5_12
_snake_case : Optional[int] = 16
_snake_case : Tuple = 2
_snake_case : Tuple = 0.02
_snake_case : str = 3
_snake_case : Any = 4
_snake_case : str = 1_28
_snake_case : Union[str, Any] = 2
_snake_case : Optional[int] = 9
_snake_case : List[str] = 1
_snake_case : List[str] = None
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : int = None
if self.use_input_mask:
_snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case : Union[str, Any] = None
if self.use_token_type_ids:
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case : List[str] = None
_snake_case : Any = None
_snake_case : Optional[int] = None
if self.use_labels:
_snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = TFConvBertModel(config=snake_case_ )
_snake_case : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_snake_case : Union[str, Any] = [input_ids, input_mask]
_snake_case : Dict = model(snake_case_ )
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[Any] = TFConvBertForMaskedLM(config=snake_case_ )
_snake_case : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_snake_case : int = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = self.num_labels
_snake_case : Any = TFConvBertForSequenceClassification(config=snake_case_ )
_snake_case : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_snake_case : int = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Dict = self.num_choices
_snake_case : int = TFConvBertForMultipleChoice(config=snake_case_ )
_snake_case : Any = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : int = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : List[str] = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
_snake_case : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_snake_case : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : Optional[Any] = self.num_labels
_snake_case : Any = TFConvBertForTokenClassification(config=snake_case_ )
_snake_case : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_snake_case : List[str] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : List[str] = TFConvBertForQuestionAnswering(config=snake_case_ )
_snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_snake_case : int = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) : Tuple = config_and_inputs
_snake_case : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _snake_case , _snake_case , unittest.TestCase):
__lowercase : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowercase : List[Any] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : Optional[int] = False
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = TFConvBertModelTester(self )
_snake_case : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : List[Any] = True
_snake_case : Dict = True
if hasattr(snake_case_ , "use_cache" ):
_snake_case : Dict = True
_snake_case : str = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_snake_case : List[Any] = getattr(self.model_tester , "key_length" , snake_case_ )
for model_class in self.all_model_classes:
_snake_case : Optional[Any] = self._prepare_for_class(snake_case_ , snake_case_ )
_snake_case : str = model_class(snake_case_ )
_snake_case : List[str] = len(model(snake_case_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
_snake_case : List[str] = os.path.join(snake_case_ , "saved_model" , "1" )
_snake_case : Dict = tf.keras.models.load_model(snake_case_ )
_snake_case : str = model(snake_case_ )
if self.is_encoder_decoder:
_snake_case : str = outputs["encoder_hidden_states"]
_snake_case : List[Any] = outputs["encoder_attentions"]
else:
_snake_case : int = outputs["hidden_states"]
_snake_case : List[Any] = outputs["attentions"]
self.assertEqual(len(snake_case_ ) , snake_case_ )
_snake_case : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(snake_case_ )
def lowerCamelCase__ ( self ):
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[Any] = True
_snake_case : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
_snake_case : Any = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
_snake_case : List[str] = getattr(self.model_tester , "key_length" , snake_case_ )
_snake_case : List[str] = getattr(self.model_tester , "key_length" , snake_case_ )
def check_decoder_attentions_output(snake_case_ ):
_snake_case : Optional[int] = len(snake_case_ )
self.assertEqual(out_len % 2 , 0 )
_snake_case : List[str] = outputs.decoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
_snake_case : Optional[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_snake_case : str = True
_snake_case : Tuple = False
_snake_case : List[Any] = model_class(snake_case_ )
_snake_case : List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
_snake_case : Optional[int] = len(snake_case_ )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
if self.is_encoder_decoder:
_snake_case : Optional[Any] = model_class(snake_case_ )
_snake_case : str = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_decoder_attentions_output(snake_case_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Dict = model_class(snake_case_ )
_snake_case : Tuple = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
# Check attention is always last and order is fine
_snake_case : str = True
_snake_case : int = True
_snake_case : Optional[int] = model_class(snake_case_ )
_snake_case : List[str] = model(self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case_ ) )
self.assertEqual(model.config.output_hidden_states , snake_case_ )
check_encoder_attentions_output(snake_case_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase):
@slow
def lowerCamelCase__ ( self ):
_snake_case : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
_snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
_snake_case : int = model(snake_case_ )[0]
_snake_case : List[str] = [1, 6, 7_68]
self.assertEqual(output.shape , snake_case_ )
_snake_case : List[Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1E-4 )
| 87 |
"""simple docstring"""
def a__ ( a : int ):
"""simple docstring"""
if not isinstance(a , a ):
raise TypeError("Input value must be an 'int' type" )
_snake_case : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 |
"""simple docstring"""
from __future__ import annotations
import requests
_a : List[str] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( a : str , a : int = 1 , a : str = "new" , a : list | None = None ):
"""simple docstring"""
_snake_case : Any = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(a ) - valid_terms ) ):
_snake_case : Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(a )
_snake_case : int = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(a )}
_snake_case : Tuple = {}
for id_ in range(a ):
_snake_case : List[str] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 87 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_a : Optional[int] = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_a : Union[str, Any] = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_a : str = """zero2"""
_a : Tuple = """zero3"""
_a : int = [ZEROa, ZEROa]
def a__ ( a : Union[str, Any] , a : Any , a : Dict ):
"""simple docstring"""
_snake_case : int = parameterized.to_safe_name("_".join(str(a ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
_a : int = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _UpperCAmelCase ( _snake_case):
@parameterized.expand(snake_case_ , name_func=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=snake_case_ , model=snake_case_ , distributed=snake_case_ , fpaa=snake_case_ , )
@require_torch_multi_gpu
@parameterized.expand(snake_case_ , name_func=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=snake_case_ , model=snake_case_ , distributed=snake_case_ , fpaa=snake_case_ , )
@parameterized.expand(snake_case_ , name_func=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=snake_case_ , model=snake_case_ , distributed=snake_case_ , fpaa=snake_case_ , )
@require_torch_multi_gpu
@parameterized.expand(snake_case_ , name_func=snake_case_ )
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
self.run_and_check(
stage=snake_case_ , model=snake_case_ , distributed=snake_case_ , fpaa=snake_case_ , )
def lowerCamelCase__ ( self , snake_case_ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = 10 , snake_case_ = True , snake_case_ = True , snake_case_ = True , ):
_snake_case : List[str] = models[model]
_snake_case : Any = self.run_trainer(
stage=snake_case_ , model_name=snake_case_ , eval_steps=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , fpaa=snake_case_ , )
self.do_checks(snake_case_ )
return output_dir
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = 10 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , ):
_snake_case : str = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case_ )
_snake_case : Optional[int] = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case : str = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
_snake_case : Optional[int] = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
_snake_case : List[str] = self.get_launcher(snake_case_ )
_snake_case : int = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
return output_dir
def lowerCamelCase__ ( self , snake_case_=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_snake_case : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 87 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def a__ ( a : float , a : float , a : bool = False ):
"""simple docstring"""
if radian_mode:
return [magnitude * cos(a ), magnitude * sin(a )]
return [magnitude * cos(radians(a ) ), magnitude * sin(radians(a ) )]
def a__ ( a : NDArray[floataa] , a : NDArray[floataa] , a : float = 10**-1 ):
"""simple docstring"""
_snake_case : NDArray[floataa] = cross(a , a )
_snake_case : float = sum(a )
return abs(a ) < eps
if __name__ == "__main__":
# Test to check if it works
_a : Tuple = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_a : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_a : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_a : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_a : List[str] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_a : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 87 | 1 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCAmelCase ( pl.LightningModule):
def __init__( self , snake_case_ ):
super().__init__()
_snake_case : Optional[int] = model
_snake_case : Optional[int] = 2
_snake_case : List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self ):
pass
def a__ ( a : str , a : str , a : str ):
"""simple docstring"""
_snake_case : List[str] = LongformerModel.from_pretrained(a )
_snake_case : Union[str, Any] = LightningModel(a )
_snake_case : Optional[Any] = torch.load(a , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_snake_case : Union[str, Any] = LongformerForQuestionAnswering.from_pretrained(a )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : str = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _UpperCAmelCase ( _snake_case):
__lowercase : Optional[Any] = """openai-gpt"""
__lowercase : Dict = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case_=4_04_78 , snake_case_=5_12 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_="cls_index" , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=0.1 , **snake_case_ , ):
_snake_case : Tuple = vocab_size
_snake_case : Dict = n_positions
_snake_case : Any = n_embd
_snake_case : Any = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Union[str, Any] = afn
_snake_case : Dict = resid_pdrop
_snake_case : str = embd_pdrop
_snake_case : Union[str, Any] = attn_pdrop
_snake_case : str = layer_norm_epsilon
_snake_case : Union[str, Any] = initializer_range
_snake_case : Any = summary_type
_snake_case : List[str] = summary_use_proj
_snake_case : Optional[int] = summary_activation
_snake_case : Union[str, Any] = summary_first_dropout
_snake_case : Optional[int] = summary_proj_to_labels
super().__init__(**snake_case_ )
| 87 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( a : Any ):
"""simple docstring"""
def wrapper(*a : int , **a : Optional[int] ):
_snake_case : Dict = timeit.default_timer()
_snake_case : int = func(*a , **a )
_snake_case : Any = timeit.default_timer() - starttime
return delta
_snake_case : Tuple = func.__name__
return wrapper
def a__ ( a : dict , a : Union[str, Any]=100 , a : Tuple=None ):
"""simple docstring"""
_snake_case : Any = []
_snake_case : Optional[int] = seq_shapes or {}
for i in range(a ):
_snake_case : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(a , _ArrayXD ):
_snake_case : Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(a , datasets.Value ):
if v.dtype == "string":
_snake_case : str = "The small grey turtle was surprisingly fast when challenged."
else:
_snake_case : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(a , datasets.Sequence ):
while isinstance(a , datasets.Sequence ):
_snake_case : int = v.feature
_snake_case : int = seq_shapes[k]
_snake_case : Dict = np.random.rand(*a ).astype(v.dtype )
_snake_case : Dict = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( a : int , a : List[Any] , a : str=100 , a : List[str]=None ):
"""simple docstring"""
_snake_case : Any = generate_examples(a , num_examples=a , seq_shapes=a )
with ArrowWriter(features=a , path=a ) as writer:
for key, record in dummy_data:
_snake_case : List[Any] = features.encode_example(a )
writer.write(a )
_snake_case , _snake_case : Tuple = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
_snake_case : List[str] = datasets.Dataset.from_file(filename=a , info=datasets.DatasetInfo(features=a ) )
return dataset
| 87 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( a : List[str] , a : int , a : int ):
"""simple docstring"""
_snake_case : Union[str, Any] = state_dict.pop(a )
_snake_case : Union[str, Any] = val
def a__ ( a : Tuple ):
"""simple docstring"""
_snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_snake_case : Dict = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
_snake_case : Tuple = value
else:
_snake_case : Dict = value
return new_state_dict
def a__ ( a : int ):
"""simple docstring"""
_snake_case : Any = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : str = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : int = in_proj_weight[:256, :]
_snake_case : List[str] = in_proj_bias[:256]
_snake_case : Optional[Any] = in_proj_weight[256:512, :]
_snake_case : List[str] = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : Dict = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_snake_case : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_snake_case : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Union[str, Any] = in_proj_weight[:256, :]
_snake_case : Tuple = in_proj_bias[:256]
_snake_case : int = in_proj_weight[256:512, :]
_snake_case : int = in_proj_bias[256:512]
_snake_case : Dict = in_proj_weight[-256:, :]
_snake_case : str = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_snake_case : Dict = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
_snake_case : Optional[int] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_snake_case : Dict = in_proj_weight_cross_attn[:256, :]
_snake_case : Any = in_proj_bias_cross_attn[:256]
_snake_case : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_snake_case : Optional[int] = in_proj_bias_cross_attn[256:512]
_snake_case : Any = in_proj_weight_cross_attn[-256:, :]
_snake_case : str = in_proj_bias_cross_attn[-256:]
def a__ ( a : str , a : int ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = image.size
_snake_case : Dict = max(a , a )
_snake_case : Union[str, Any] = 800 if "detection" in checkpoint_url else 1_000
_snake_case : Any = target_max_size / current_max_size
_snake_case : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( a : str ):
"""simple docstring"""
_snake_case : str = F.to_tensor(a )
_snake_case : Union[str, Any] = F.normalize(a , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( a : Optional[Any] , a : Any , a : Union[str, Any] ):
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
_snake_case : Tuple = torch.hub.load_state_dict_from_url(a , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
_snake_case : Union[str, Any] = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_snake_case : int = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_snake_case : Optional[int] = state_dict.pop(a )
_snake_case : Any = val
# create HuggingFace model and load state dict
_snake_case : Tuple = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_snake_case : Any = 15
_snake_case : int = 2
_snake_case : Optional[Any] = {0: "table", 1: "table rotated"}
_snake_case : Union[str, Any] = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
_snake_case : Any = 125
_snake_case : Union[str, Any] = 6
_snake_case : List[str] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
_snake_case : Any = idalabel
_snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
_snake_case : Union[str, Any] = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
_snake_case : str = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
_snake_case : Optional[int] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
_snake_case : Optional[Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=a )
_snake_case : Dict = Image.open(a ).convert("RGB" )
_snake_case : Union[str, Any] = normalize(resize(a , a ) ).unsqueeze(0 )
_snake_case : str = model(a )
if "detection" in checkpoint_url:
_snake_case : int = (1, 15, 3)
_snake_case : List[str] = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
_snake_case : List[str] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
_snake_case : Union[str, Any] = (1, 125, 7)
_snake_case : str = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
_snake_case : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
_snake_case : int = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_a : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 87 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.