code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy"""
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase ( self : int , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 4, 64, 64) , lowerCamelCase : Tuple=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) , dtype=lowerCamelCase )
return image
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Tuple=False , lowerCamelCase : str="CompVis/stable-diffusion-v1-4" ) -> Any:
"""simple docstring"""
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = """bf16""" if fpaa else None
_UpperCAmelCase , _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase , subfolder="""unet""" , dtype=lowerCamelCase , revision=lowerCamelCase )
return model, params
def lowerCamelCase ( self : Tuple , lowerCamelCase : Any=0 , lowerCamelCase : Any=(4, 77, 768) , lowerCamelCase : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
_UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) , dtype=lowerCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def lowerCamelCase ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=lowerCamelCase )
_UpperCAmelCase = self.get_latents(lowerCamelCase , fpaa=lowerCamelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(lowerCamelCase , fpaa=lowerCamelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , lowerCamelCase , jnp.array(lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def lowerCamelCase ( self : Any , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=lowerCamelCase )
_UpperCAmelCase = self.get_latents(lowerCamelCase , shape=(4, 4, 96, 96) , fpaa=lowerCamelCase )
_UpperCAmelCase = self.get_encoder_hidden_states(lowerCamelCase , shape=(4, 77, 1024) , fpaa=lowerCamelCase )
_UpperCAmelCase = model.apply(
{"""params""": params} , lowerCamelCase , jnp.array(lowerCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCamelCase , ).sample
assert sample.shape == latents.shape
_UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_UpperCAmelCase = jnp.array(lowerCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase , lowerCamelCase , atol=1E-2 )
| 108
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""image-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""image""": Image()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""labels""": ClassLabel} )
__UpperCAmelCase : str ="image"
__UpperCAmelCase : str ="labels"
def snake_case ( self , __a ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __a ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.label_schema.copy()
__lowerCAmelCase = features[self.label_column]
__lowerCAmelCase = label_schema
return task_template
@property
def snake_case ( self ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 636
| 0
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =k_size // 2
UpperCAmelCase__ , UpperCAmelCase__ =mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase__ =1 / (2 * pi * sigma) * exp(-(square(UpperCAmelCase__ ) + square(UpperCAmelCase__ )) / (2 * square(UpperCAmelCase__ )) )
return g
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ =image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase__ =height - k_size + 1
UpperCAmelCase__ =width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase__ =zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase__ =0
for i, j in product(range(UpperCAmelCase__ ) , range(UpperCAmelCase__ ) ):
UpperCAmelCase__ =ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase__ =window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase__ =gen_gaussian_kernel(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase__ =ravel(UpperCAmelCase__ )
# reshape and get the dst image
UpperCAmelCase__ =dot(UpperCAmelCase__ , UpperCAmelCase__ ).reshape(UpperCAmelCase__ , UpperCAmelCase__ ).astype(UpperCAmelCase__ )
return dst
if __name__ == "__main__":
# read original image
UpperCamelCase_ = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
UpperCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCamelCase_ = gaussian_filter(gray, 3, sigma=1)
UpperCamelCase_ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 710
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=10, A_=3, A_=2, A_=2, A_=2, A_=True, A_=True, A_=32, A_=5, A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=10, A_=0.02, A_=0.9, A_=None, ) -> int:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =tubelet_size
UpperCAmelCase__ =num_frames
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =mask_ratio
UpperCAmelCase__ =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
UpperCAmelCase__ =(image_size // patch_size) ** 2
UpperCAmelCase__ =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
UpperCAmelCase__ =int(mask_ratio * self.seq_length )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=A_, initializer_range=self.initializer_range, )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Dict:
UpperCAmelCase__ =VideoMAEModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =VideoMAEForPreTraining(A_ )
model.to(A_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase__ =torch.ones((self.num_masks,) )
UpperCAmelCase__ =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
UpperCAmelCase__ =mask.expand(self.batch_size, -1 ).bool()
UpperCAmelCase__ =model(A_, A_ )
# model only returns predictions for masked patches
UpperCAmelCase__ =mask.sum().item()
UpperCAmelCase__ =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( a, a, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__UpperCamelCase = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =VideoMAEModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, has_text_modality=A_, hidden_size=37 )
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Optional[int]:
UpperCAmelCase__ =copy.deepcopy(A_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
UpperCAmelCase__ =torch.ones((self.model_tester.num_masks,) )
UpperCAmelCase__ =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
UpperCAmelCase__ =mask.expand(self.model_tester.batch_size, -1 ).bool()
UpperCAmelCase__ =bool_masked_pos.to(A_ )
if return_labels:
if model_class in [
*get_values(A_ ),
]:
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A_ )
return inputs_dict
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_, nn.Linear ) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(A_ )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], A_ )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =VideoMAEModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __UpperCAmelCase ( self ) -> Tuple:
if not self.has_attentions:
pass
else:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
for model_class in self.all_model_classes:
UpperCAmelCase__ =self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase__ =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
UpperCAmelCase__ =True
UpperCAmelCase__ =False
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
UpperCAmelCase__ =len(A_ )
# Check attention is always last and order is fine
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
self.assertEqual(out_len + 1, len(A_ ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(A_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def __UpperCAmelCase ( self ) -> Dict:
def check_hidden_states_output(A_, A_, A_ ):
UpperCAmelCase__ =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(A_, A_ ) )
UpperCAmelCase__ =outputs.hidden_states
UpperCAmelCase__ =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A_ ), A_ )
UpperCAmelCase__ =self.model_tester.seq_length - self.model_tester.num_masks
UpperCAmelCase__ =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ =True
check_hidden_states_output(A_, A_, A_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCAmelCase__ =np.load(A )
return list(A )
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Any:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to(
A_ )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_video()
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
# verify the logits
UpperCAmelCase__ =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, A_ )
UpperCAmelCase__ =torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A_, atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(A_ )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_video()
UpperCAmelCase__ =image_processor(A_, return_tensors="pt" ).to(A_ )
# add boolean mask, indicating which patches to mask
UpperCAmelCase__ =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt" )
UpperCAmelCase__ =torch.load(A_ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
# verify the logits
UpperCAmelCase__ =torch.Size([1, 1408, 1536] )
UpperCAmelCase__ =torch.tensor(
[[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]], device=A_ )
self.assertEqual(outputs.logits.shape, A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], A_, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
UpperCAmelCase__ =torch.tensor([0.51_42], device=A_ )
self.assertTrue(torch.allclose(outputs.loss, A_, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
UpperCAmelCase__ =VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=A_ ).to(
A_ )
with torch.no_grad():
UpperCAmelCase__ =model(**A_ )
UpperCAmelCase__ =torch.tensor(torch.tensor([0.64_69] ), device=A_ )
self.assertTrue(torch.allclose(outputs.loss, A_, atol=1E-4 ) )
| 510
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __lowerCAmelCase ( _a ):
@staticmethod
def lowerCamelCase (__magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=__magic_name__ , default=__magic_name__ , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=__magic_name__ , help='''Name of the model to download''' )
download_parser.set_defaults(func=__magic_name__ )
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = model
snake_case_ : List[str] = cache
snake_case_ : List[str] = force
snake_case_ : Optional[Any] = trust_remote_code
def lowerCamelCase (self ) -> int:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 60
|
__a: List[Any] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 108
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase ( A_ = 1_500_000 )-> int:
'''simple docstring'''
a : defaultdict = defaultdict(A_ )
a : List[str] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , A_ , 2 ):
if gcd(A_ , A_ ) > 1:
continue
a : Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(A_ , limit + 1 , A_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 135
|
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowercase = float("""nan""")
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Optional[int]):
a : Any = sys.stdout
a : Any = open(__UpperCAmelCase , "a")
def __getattr__( self : Dict , __UpperCAmelCase : List[Any]):
return getattr(self.stdout , __UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.stdout.write(__UpperCAmelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __UpperCAmelCase , 0 , re.M))
def lowercase ( A_=80 , A_=False )-> List[str]:
'''simple docstring'''
a : List[Any] = []
# deal with critical env vars
a : List[Any] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
a : Any = os.environ.get(A_ , A_ )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
a : List[Any] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(A_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a : Any = []
a : Any = ""
while len(A_ ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(A_ ) == 0 or len(A_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(A_ )
a : List[Any] = ""
return "\\\n".join(A_ )
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
a : Optional[int] = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
a : Dict = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> int:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
a : Optional[Any] = subprocess.run(A_ , capture_output=A_ , text=A_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
a : List[str] = variation.replace(" " , "-" )
with open(Path(A_ ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(A_ ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
a : Dict = json.load(A_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Tuple:
'''simple docstring'''
a : List[Any] = []
a : List[str] = []
a : Union[str, Any] = F'''{id}: {variation:<{longest_variation_len}}'''
a : Any = F'''{preamble}: '''
a : Optional[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(A_ ) , desc=A_ , leave=A_ ):
a : Dict = process_run_single(
A_ , A_ , A_ , A_ , A_ , A_ , A_ )
a : Tuple = single_run_metrics[target_metric_key]
if not math.isnan(A_ ):
metrics.append(A_ )
results.append(A_ )
outcome += "✓"
else:
outcome += "✘"
a : List[str] = F'''\33[2K\r{outcome}'''
if len(A_ ) > 0:
a : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a : Tuple = round(mean_metrics[target_metric_key] , 2 )
a : Optional[int] = F'''{outcome} {mean_target}'''
if len(A_ ) > 1:
results_str += F''' {tuple(round(A_ , 2 ) for x in results )}'''
print(A_ )
a : Optional[int] = variation
return mean_metrics
else:
print(A_ )
return {variation_key: variation, target_metric_key: nan}
def lowercase ( )-> Any:
'''simple docstring'''
a : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
a : Optional[Any] = pd.DataFrame(A_ )
a : Tuple = "variation"
a : Union[str, Any] = "diff_%"
a : Optional[Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(A_ ):
# as a fallback, use the minimal value as the sentinel
a : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(A_ ):
a : Tuple = df.apply(
lambda A_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
a : str = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a : Tuple = df.reindex(A_ , axis="columns" ) # reorder cols
# capitalize
a : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
a : Dict = df.rename(lambda A_ : c.replace("_" , "<br>" ) , axis="columns" )
a : Tuple = df.rename(lambda A_ : c.replace("_" , "\n" ) , axis="columns" )
a : Dict = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=A_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=A_ , floatfmt=".2f" )]
print("\n\n".join(A_ ) )
def lowercase ( )-> List[str]:
'''simple docstring'''
a : str = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=A_ , type=A_ , required=A_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=A_ , type=A_ , nargs="+" , required=A_ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=A_ , type=A_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=A_ , type=A_ , required=A_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=A_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=A_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=A_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=A_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
a : int = parser.parse_args()
a : str = args.output_dir
Path(A_ ).mkdir(exist_ok=A_ )
a : Tuple = get_base_command(A_ , A_ )
# split each dimension into its --foo variations
a : Optional[int] = [list(map(str.strip , re.split(R"\|" , A_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*A_ ) ) ) )
a : str = max(len(A_ ) for x in variations )
# split wanted keys
a : Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
a : Optional[int] = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
a : str = Tee(A_ )
print(F'''\n*** Running {len(A_ )} benchmarks:''' )
print(F'''Base command: {" ".join(A_ )}''' )
a : str = "variation"
a : List[Any] = []
for id, variation in enumerate(tqdm(A_ , desc="Total completion: " , leave=A_ ) ):
a : List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , A_ , A_ , A_ , A_ , args.target_metric_key , A_ , args.repeat_times , A_ , args.verbose , ) )
process_results(A_ , args.target_metric_key , A_ , args.base_variation , A_ )
if __name__ == "__main__":
main()
| 135
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : int=7 , lowerCamelCase : Dict=3 , lowerCamelCase : Optional[Any]=18 , lowerCamelCase : Optional[Any]=30 , lowerCamelCase : Any=400 , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , lowerCamelCase : Optional[int]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , lowerCamelCase : Optional[int]=True , ) -> List[Any]:
__snake_case : List[str] = size if size is not None else {"height": 224, "width": 224}
__snake_case : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case : Tuple = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Union[str, Any] = image_size
__snake_case : Union[str, Any] = min_resolution
__snake_case : str = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Optional[int] = size
__snake_case : int = do_center_crop
__snake_case : Dict = crop_size
__snake_case : List[Any] = do_normalize
__snake_case : str = image_mean
__snake_case : Optional[int] = image_std
__snake_case : Union[str, Any] = do_convert_rgb
def __snake_case ( self : str ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=False ) -> Optional[int]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : List[str] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
__snake_case : List[str] = []
for i in range(self.batch_size ):
__snake_case , __snake_case : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : str = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
if torchify:
__snake_case : Dict = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : int ) -> str:
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=lowerCamelCase )
@property
def __snake_case ( self : List[str] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : List[Any] ) -> Dict:
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __snake_case ( self : str ) -> int:
pass
def __snake_case ( self : Optional[int] ) -> Any:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Optional[Any] ) -> Optional[int]:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Any = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[str] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : Any ) -> Dict:
# Initialize image_processing
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : str = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : Dict = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ) -> Optional[int]:
__snake_case : Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=lowerCamelCase )
__snake_case : Any = 3
@property
def __snake_case ( self : List[Any] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_convert_rgb" ) )
def __snake_case ( self : int ) -> Tuple:
pass
def __snake_case ( self : List[Any] ) -> Any:
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case : List[Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
| 1
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = cva.getAffineTransform(_A , _A )
return cva.warpAffine(_A , _A , (rows, cols) )
if __name__ == "__main__":
# read original image
a : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
a : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a : Dict = gray_img.shape
# set different points to rotate image
a : str = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a : Union[str, Any] = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a : List[str] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a : Any = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a : Optional[int] = plt.figure(1)
a : Dict = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 19
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 19
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
def __init__( self : Tuple , _A : Any , _A : List[str]=13 , _A : Optional[int]=[30, 30] , _A : List[str]=2 , _A : Union[str, Any]=3 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : str=0.1 , _A : Tuple=10 , _A : List[Any]=0.0_2 , _A : Any=3 , _A : Optional[int]=None , _A : Tuple=8 , _A : Optional[Any]=10 , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : List[str] = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : str = num_labels
UpperCAmelCase__ : List[str] = scope
UpperCAmelCase__ : Union[str, Any] = n_targets
UpperCAmelCase__ : int = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
UpperCAmelCase__ : Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
UpperCAmelCase__ : Tuple = num_patches + 1 + self.num_detection_tokens
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
UpperCAmelCase__ : List[Any] = []
for i in range(self.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Optional[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
UpperCAmelCase__ : Union[str, Any] = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowercase_ ( self : Union[str, Any] , _A : int , _A : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[Any] = model(pixel_values=_A )
UpperCAmelCase__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
UpperCAmelCase__ : Dict = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
UpperCAmelCase__ : int = []
for i in range(self.model_tester.batch_size ):
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : str = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
UpperCAmelCase__ : str = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
UpperCAmelCase__ : str = labels
return inputs_dict
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = YolosModelTester(self )
UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(_A )
UpperCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = True
# in YOLOS, the seq_len is different
UpperCAmelCase__ : Union[str, Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : Any = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
UpperCAmelCase__ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Any = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
UpperCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
def check_hidden_states_output(_A : Optional[int] , _A : int , _A : List[str] ):
UpperCAmelCase__ : Union[str, Any] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase__ : str = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Optional[int] = outputs.hidden_states
UpperCAmelCase__ : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
UpperCAmelCase__ : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : Any = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : str = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Dict:
UpperCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_A )
UpperCAmelCase__ : Optional[Any] = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(inputs.pixel_values )
# verify outputs
UpperCAmelCase__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_A , )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 ) )
# verify postprocessing
UpperCAmelCase__ : Any = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
UpperCAmelCase__ : str = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_A )
UpperCAmelCase__ : Any = [75, 75, 17, 63, 17]
UpperCAmelCase__ : int = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _A , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _A ) )
| 75
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75
| 1
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = None ) -> Dict:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
snake_case__ = nums[0]
for i in range(1 , len(__lowerCAmelCase ) ):
snake_case__ = nums[i]
snake_case__ = max(__lowerCAmelCase , ans + num , __lowerCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase__ : Optional[Any] = int(input("""Enter number of elements : """).strip())
lowerCamelCase__ : Union[str, Any] = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 701
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
snake_case__ = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
snake_case__ = abs(__lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return sum(int(__lowerCAmelCase ) for c in str(abs(__lowerCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase ) -> None:
snake_case__ = F"""{func.__name__}({value})"""
snake_case__ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__lowerCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 208
| 0
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = np.inf
def set_batch_size(_SCREAMING_SNAKE_CASE ) -> None:
nonlocal batch_size
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary":
__lowercase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return None if batch_size is np.inf else batch_size
class _A ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] , lowerCamelCase : NestedDataStructureLike[PathLike] , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , **lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
__lowercase = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
__lowercase = _PACKAGED_DATASETS_MODULES["parquet"][1]
__lowercase = Parquet(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , hash=lowerCamelCase , **lowerCamelCase , )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
__lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
__lowercase = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
class _A :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : Dataset , lowerCamelCase : Union[PathLike, BinaryIO] , lowerCamelCase : Optional[int] = None , **lowerCamelCase : Tuple , ):
'''simple docstring'''
__lowercase = dataset
__lowercase = path_or_buf
__lowercase = batch_size or get_writer_batch_size(dataset.features )
__lowercase = parquet_writer_kwargs
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
__lowercase = self._write(file_obj=lowerCamelCase , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
else:
__lowercase = self._write(file_obj=self.path_or_buf , batch_size=lowerCamelCase , **self.parquet_writer_kwargs )
return written
def _snake_case ( self : Tuple , lowerCamelCase : BinaryIO , lowerCamelCase : int , **lowerCamelCase : int ):
'''simple docstring'''
__lowercase = 0
__lowercase = parquet_writer_kwargs.pop("path_or_buf" , lowerCamelCase )
__lowercase = self.dataset.features.arrow_schema
__lowercase = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase , **lowerCamelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , lowerCamelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
__lowercase = query_table(
table=self.dataset._data , key=slice(lowerCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowerCamelCase )
written += batch.nbytes
writer.close()
return written
| 402
|
from collections import defaultdict
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = 1
__lowercase = True
for v in tree[start]:
if v not in visited:
ret += dfs(_SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(_SCREAMING_SNAKE_CASE )
return ret
def snake_case_ ( ):
dfs(1 )
if __name__ == "__main__":
snake_case__ , snake_case__ : Optional[int] = 10, 9
snake_case__ : Optional[Any] = defaultdict(list)
snake_case__ : dict[int, bool] = {}
snake_case__ : list[int] = []
snake_case__ : List[str] = 0
snake_case__ : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 402
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCAmelCase__ = HUGGINGFACE_HUB_CACHE
lowerCAmelCase__ = """config.json"""
lowerCAmelCase__ = """diffusion_pytorch_model.bin"""
lowerCAmelCase__ = """diffusion_flax_model.msgpack"""
lowerCAmelCase__ = """model.onnx"""
lowerCAmelCase__ = """diffusion_pytorch_model.safetensors"""
lowerCAmelCase__ = """weights.pb"""
lowerCAmelCase__ = """https://huggingface.co"""
lowerCAmelCase__ = default_cache_path
lowerCAmelCase__ = """diffusers_modules"""
lowerCAmelCase__ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowerCAmelCase__ = ["""fp16""", """non-ema"""]
lowerCAmelCase__ = """.self_attn"""
| 700
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
def is_valid_tree(_UpperCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCAmelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(_UpperCAmelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576
| 0
|
from __future__ import annotations
from collections.abc import Generator
def _a ( ):
lowerCAmelCase__ : dict[int, int] = {}
lowerCAmelCase__ : Dict = 2
while True:
lowerCAmelCase__ : Optional[int] = factor_map.pop(__UpperCamelCase ,__UpperCamelCase )
if factor:
lowerCAmelCase__ : Any = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase__ : Any = factor
else:
lowerCAmelCase__ : Tuple = prime
yield prime
prime += 1
def _a ( __UpperCamelCase : float = 1e10 ):
lowerCAmelCase__ : Optional[int] = sieve()
lowerCAmelCase__ : Tuple = 1
while True:
lowerCAmelCase__ : int = next(__UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 233
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : Optional[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class lowercase ( __UpperCamelCase ):
__a = """xmod"""
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=("en_XX",) , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : int = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : List[str] = pre_norm
lowerCAmelCase__ : str = adapter_reduction_factor
lowerCAmelCase__ : Optional[int] = adapter_layer_norm
lowerCAmelCase__ : List[Any] = adapter_reuse_layer_norm
lowerCAmelCase__ : Optional[Any] = ln_before_adapter
lowerCAmelCase__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[str] = default_language
class lowercase ( __UpperCamelCase ):
@property
def lowercase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase__ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 233
| 1
|
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = os.path.abspath(UpperCamelCase__ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
__UpperCAmelCase = tf.train.list_variables(UpperCamelCase__ )
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__UpperCAmelCase = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
__UpperCAmelCase = name[1:]
# figure out how many levels deep the name is
__UpperCAmelCase = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase__ )
# read data
__UpperCAmelCase = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
names.append('''/'''.join(UpperCamelCase__ ) )
arrays.append(UpperCamelCase__ )
logger.info(f"""Read a total of {len(UpperCamelCase__ ):,} layers""" )
# Sanity check
if len(set(UpperCamelCase__ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(UpperCamelCase__ ) )})""" )
__UpperCAmelCase = list(set(UpperCamelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = full_name.split('''/''' )
__UpperCAmelCase = model
__UpperCAmelCase = []
for i, m_name in enumerate(UpperCamelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__UpperCAmelCase = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''encoder''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''layer''' )
__UpperCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''pooler''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''attention''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''intermediate''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__UpperCAmelCase = getattr(UpperCamelCase__ , '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
__UpperCAmelCase = '''.'''.join(UpperCamelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , UpperCamelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , UpperCamelCase__ ):
__UpperCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__UpperCAmelCase = array.transpose()
if pointer.shape == array.shape:
__UpperCAmelCase = torch.from_numpy(UpperCamelCase__ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
"""simple docstring"""
# Instantiate model
logger.info(f"""Loading model based on config from {config_path}...""" )
__UpperCAmelCase = BertConfig.from_json_file(UpperCamelCase__ )
__UpperCAmelCase = BertModel(UpperCamelCase__ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 654
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class _snake_case ( A__ ):
lowerCAmelCase_ : str = """efficientnet"""
def __init__( self , a__ = 3 , a__ = 600 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [32, 16, 24, 40, 80, 112, 192] , a__ = [16, 24, 40, 80, 112, 192, 320] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.2_5 , a__ = "swish" , a__ = 2_560 , a__ = "mean" , a__ = 0.0_2 , a__ = 0.0_0_1 , a__ = 0.9_9 , a__ = 0.5 , a__ = 0.2 , **a__ , ) -> Any:
'''simple docstring'''
super().__init__(**a_ )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = width_coefficient
snake_case_ = depth_coefficient
snake_case_ = depth_divisor
snake_case_ = kernel_sizes
snake_case_ = in_channels
snake_case_ = out_channels
snake_case_ = depthwise_padding
snake_case_ = strides
snake_case_ = num_block_repeats
snake_case_ = expand_ratios
snake_case_ = squeeze_expansion_ratio
snake_case_ = hidden_act
snake_case_ = hidden_dim
snake_case_ = pooling_type
snake_case_ = initializer_range
snake_case_ = batch_norm_eps
snake_case_ = batch_norm_momentum
snake_case_ = dropout_rate
snake_case_ = drop_connect_rate
snake_case_ = sum(a_ ) * 4
class _snake_case ( A__ ):
lowerCAmelCase_ : Optional[int] = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return 1e-5
| 400
|
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : str = GPTaTokenizer
a_ : Dict = GPTaTokenizerFast
a_ : List[Any] = True
a_ : str = {"""add_prefix_space""": True}
a_ : Dict = False
def lowerCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCAmelCase_ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Optional[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowerCamelCase ( self : Optional[Any] , **a_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : List[Any] , **a_ : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any] ):
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : Dict = "lower newer"
return input_text, output_text
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ : str = "lower newer"
lowerCAmelCase_ : List[str] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
self.assertListEqual(a_ , a_ )
lowerCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : str ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : int = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : int = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : Any = self.get_rust_tokenizer(add_prefix_space=a_ )
lowerCAmelCase_ : List[Any] = tokenizer.encode(a_ , add_prefix_space=a_ )
lowerCAmelCase_ : str = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
# Testing the unknown token
lowerCAmelCase_ : List[str] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def lowerCamelCase ( self : Dict , *a_ : Optional[int] , **a_ : Union[str, Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase ( self : Optional[int] , a_ : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
# Simple input
lowerCAmelCase_ : Optional[int] = "This is a simple input"
lowerCAmelCase_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : List[str] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Simple input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(a_ , tokenizer_r.encode_plus , a_ , max_length=a_ , padding="max_length" )
# Pair input
self.assertRaises(
a_ , tokenizer_r.batch_encode_plus , a_ , max_length=a_ , padding="max_length" , )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Any = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
lowerCAmelCase_ : Dict = tokenizer(*a_ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , truncate=a_ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Tuple = "$$$"
lowerCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a_ , add_bos_token=a_ )
lowerCAmelCase_ : Tuple = "This is a simple input"
lowerCAmelCase_ : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Optional[Any] = tokenizer.bos_token_id
lowerCAmelCase_ : int = tokenizer(a_ )
lowerCAmelCase_ : Optional[Any] = tokenizer(a_ )
self.assertEqual(out_s.input_ids[0] , a_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : int = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase ( self : List[str] ):
pass
def lowerCamelCase ( self : List[Any] ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowerCAmelCase_ : int = [self.get_tokenizer(do_lower_case=a_ , add_bos_token=a_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase_ : Optional[Any] = "Encode this."
lowerCAmelCase_ : List[str] = "This one too please."
lowerCAmelCase_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
encoded_sequence += tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCAmelCase_ : Dict = tokenizer.encode_plus(
a_ , a_ , add_special_tokens=a_ , return_special_tokens_mask=a_ , )
lowerCAmelCase_ : List[str] = encoded_sequence_dict["input_ids"]
lowerCAmelCase_ : Optional[Any] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(a_ ) , len(a_ ) )
lowerCAmelCase_ : str = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a_ )
]
lowerCAmelCase_ : List[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a_ , a_ )
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=a_ )
lowerCAmelCase_ : Any = "A photo of a cat"
lowerCAmelCase_ : List[str] = tokenizer.encode(
a_ , )
# Same as above
self.assertEqual(a_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=a_ )
lowerCAmelCase_ : Tuple = "bos"
lowerCAmelCase_ : Dict = tokenizer.get_vocab()["bos"]
lowerCAmelCase_ : List[Any] = "A photo of a cat"
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(
a_ , )
# We changed the bos token
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase_ : int = tokenizer.encode(
a_ , )
self.assertEqual(a_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 610
| 0
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
lowercase_ = None
lowercase_ = {
"7B": 11008,
"13B": 13824,
"30B": 17920,
"65B": 22016,
"70B": 28672,
}
lowercase_ = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[str]=256 ) -> List[str]:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=True ) -> str:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
A__ = read_json(os.path.join(SCREAMING_SNAKE_CASE__ , 'params.json' ) )
A__ = NUM_SHARDS[model_size]
A__ = params['n_layers']
A__ = params['n_heads']
A__ = n_heads // num_shards
A__ = params['dim']
A__ = dim // n_heads
A__ = 1_0000.0
A__ = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE__ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ = params['n_kv_heads'] # for GQA / MQA
A__ = n_heads_per_shard // num_key_value_heads
A__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ = n_heads
A__ = n_heads_per_shard
A__ = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=n_heads , SCREAMING_SNAKE_CASE__ : Dict=dim , SCREAMING_SNAKE_CASE__ : Any=dim ):
return w.view(SCREAMING_SNAKE_CASE__ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE__ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
A__ = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE__ , f'consolidated.{i:02d}.pth' ) , map_location='cpu' )
for i in range(SCREAMING_SNAKE_CASE__ )
]
A__ = 0
A__ = {'weight_map': {}}
for layer_i in range(SCREAMING_SNAKE_CASE__ ):
A__ = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
A__ = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
A__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
A__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
A__ = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
A__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 )
A__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 )
A__ = inv_freq
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
A__ = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
A__ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
A__ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(SCREAMING_SNAKE_CASE__ )] , dim=0 ),
}
for k, v in state_dict.items():
A__ = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Write configs
A__ = {'total_size': param_count * 2}
write_json(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin.index.json' ) )
A__ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
A__ = params['multiple_of'] if 'multiple_of' in params else 256
A__ = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE__ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=SCREAMING_SNAKE_CASE__ , )
config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
A__ = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ , safe_serialization=SCREAMING_SNAKE_CASE__ )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
'''simple docstring'''
A__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
A__ = tokenizer_class(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=SCREAMING_SNAKE_CASE__ , help='Whether or not to save using `safetensors`.' )
A__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
A__ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 586
|
from math import loga
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = ShapEPipeline
A_ = ["prompt"]
A_ = ["prompt"]
A_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
A_ = False
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 8
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Union[str, Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__a : Optional[Any] = PriorTransformer(**__a )
return model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__a : Optional[Any] = ShapERenderer(**__a )
return model
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.dummy_prior
__a : int = self.dummy_text_encoder
__a : Optional[int] = self.dummy_tokenizer
__a : int = self.dummy_renderer
__a : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__a : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : Dict = torch.Generator(device=__a ).manual_seed(__a )
__a : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Any = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**__a )
__a : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Union[str, Any] = pipe(**self.get_dummy_inputs(__a ) )
__a : Optional[Any] = output.images[0]
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__a : Optional[Any] = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = torch_device == 'cpu'
__a : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_components()
__a : Any = self.pipeline_class(**__a )
__a : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : int = 1
__a : int = 2
__a : int = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__a : Any = batch_size * [inputs[key]]
__a : List[Any] = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__a : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__a : Optional[int] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__a : Dict = pipe(
'a shark' , generator=__a , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 476
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowercase : Any = 8
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=BITS ):
__a : Optional[int] = x.device
__a : Optional[int] = (x * 255).int().clamp(0 , 255 )
__a : Dict = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = rearrange(_SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__a : Dict = rearrange(_SCREAMING_SNAKE_CASE , 'b c h w -> b c 1 h w' )
__a : Dict = ((x & mask) != 0).float()
__a : Dict = rearrange(_SCREAMING_SNAKE_CASE , 'b c d h w -> b (c d) h w' )
__a : List[str] = bits * 2 - 1
return bits
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=BITS ):
__a : Union[str, Any] = x.device
__a : List[Any] = (x > 0).int()
__a : Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_SCREAMING_SNAKE_CASE , dtype=torch.intaa )
__a : Tuple = rearrange(_SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__a : List[str] = rearrange(_SCREAMING_SNAKE_CASE , 'b (c d) h w -> b c d h w' , d=8 )
__a : List[Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCamelCase (self : Dict , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__a : Any = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__a : Optional[int] = self.alphas_cumprod[timestep]
__a : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__a : List[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__a : Optional[int] = self.bit_scale
if self.config.clip_sample:
__a : int = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__a : List[Any] = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__a : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : List[str] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__a : List[Any] = model_output.device if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else 'cpu'
__a : Tuple = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
__a : int = self._get_variance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
__a : Union[str, Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def lowerCamelCase (self : Any , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : torch.FloatTensor , _SCREAMING_SNAKE_CASE : Optional[Any]="epsilon" , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : bool = True , ):
__a : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__a , __a : Any = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
__a : Any = None
# 1. compute alphas, betas
__a : Union[str, Any] = self.alphas_cumprod[t]
__a : Tuple = self.alphas_cumprod[t - 1] if t > 0 else self.one
__a : List[Any] = 1 - alpha_prod_t
__a : Dict = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__a : List[str] = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
__a : Any = self.bit_scale
if self.config.clip_sample:
__a : List[str] = torch.clamp(_SCREAMING_SNAKE_CASE , -scale , _SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Optional[Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__a : Optional[int] = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__a : int = 0
if t > 0:
__a : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_SCREAMING_SNAKE_CASE ).to(model_output.device )
__a : Dict = (self._get_variance(_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE ) ** 0.5) * noise
__a : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a , __a = 1.0 , ):
'''simple docstring'''
super().__init__()
__a : Union[str, Any] = bit_scale
__a : Optional[int] = (
ddim_bit_scheduler_step if isinstance(__a , __a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self , __a = 256 , __a = 256 , __a = 50 , __a = None , __a = 1 , __a = "pil" , __a = True , **__a , ):
'''simple docstring'''
__a : Any = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__a , )
__a : Optional[Any] = decimal_to_bits(__a ) * self.bit_scale
__a : Any = latents.to(self.device )
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__a : Any = self.unet(__a , __a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a : List[str] = self.scheduler.step(__a , __a , __a ).prev_sample
__a : Dict = bits_to_decimal(__a )
if output_type == "pil":
__a : Dict = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 476
| 1
|
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=13 , lowerCamelCase__ : str=7 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]=99 , lowerCamelCase__ : Optional[Any]=[1, 1, 2] , lowerCamelCase__ : str=1 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : Tuple=8 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Optional[int]="gelu_new" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : Optional[Any]=512 , lowerCamelCase__ : int=3 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Any=False , ) -> List[str]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = block_sizes
__lowercase = num_decoder_layers
__lowercase = d_model
__lowercase = n_head
__lowercase = d_head
__lowercase = d_inner
__lowercase = hidden_act
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = 2
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase = n_head
# Used in the tests to check the size of the first hidden state
__lowercase = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase = self.num_hidden_layers + 2
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , ) -> str:
"""simple docstring"""
__lowercase = TFFunnelModel(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase = False
__lowercase = TFFunnelModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , ) -> Dict:
"""simple docstring"""
__lowercase = TFFunnelBaseModel(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase = False
__lowercase = TFFunnelBaseModel(config=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFFunnelForPreTraining(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Dict , ) -> List[Any]:
"""simple docstring"""
__lowercase = TFFunnelForMaskedLM(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , ) -> List[Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
__lowercase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , ) -> Any:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFFunnelForTokenClassification(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , ) -> Any:
"""simple docstring"""
__lowercase = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Union[str, Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Optional[int] = False
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFFunnelModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : str = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFFunnelModelTester(self , base=lowerCamelCase__ )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 714
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : int = ['image_processor', 'tokenizer']
UpperCamelCase_ : Dict = 'OwlViTImageProcessor'
UpperCamelCase_ : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : str , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=None , **lowerCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
__lowercase = kwargs.pop('''feature_extractor''' )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="max_length" , lowerCamelCase__ : Union[str, Any]="np" , **lowerCamelCase__ : Dict ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not isinstance(text[0] , lowerCamelCase__ )):
__lowercase = [self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )]
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ) and isinstance(text[0] , lowerCamelCase__ ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(lowerCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase__ ) != max_num_queries:
__lowercase = t + [''' '''] * (max_num_queries - len(lowerCamelCase__ ))
__lowercase = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
encodings.append(lowerCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
__lowercase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
__lowercase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
__lowercase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase_ ( self : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor.post_process(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 362
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Union[str, Any] = "vit_msn"
def __init__( self , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=2_2_4 , _lowerCAmelCase=1_6 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
super().__init__(**lowerCAmelCase__ )
_lowercase : Tuple = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : List[str] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Optional[Any] = initializer_range
_lowercase : Union[str, Any] = layer_norm_eps
_lowercase : str = image_size
_lowercase : Optional[Any] = patch_size
_lowercase : Any = num_channels
_lowercase : int = qkv_bias
| 66
|
from ...processing_utils import ProcessorMixin
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """WhisperFeatureExtractor"""
_UpperCAmelCase = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : List[Any] = False
def UpperCamelCase__ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ , language=lowerCAmelCase__ , no_timestamps=lowerCAmelCase__ )
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('audio' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = kwargs.pop('sampling_rate' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop('text' , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = args[0]
SCREAMING_SNAKE_CASE_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__="np" ):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
| 101
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
snake_case_ : Union[str, Any] = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
snake_case_ : str = {
'ctrl': 2_5_6,
}
snake_case_ : Tuple = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = set()
SCREAMING_SNAKE_CASE_ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
SCREAMING_SNAKE_CASE_ : List[Any] = set(SCREAMING_SNAKE_CASE_ )
return pairs
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self , lowercase__ , lowercase__ , lowercase__="<unk>" , **lowercase__ ):
"""simple docstring"""
super().__init__(unk_token=lowercase__ , **lowercase__ )
with open(lowercase__ , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : List[Any] = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : int = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
SCREAMING_SNAKE_CASE_ : int = {}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def __lowerCamelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : List[str] = tuple(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
SCREAMING_SNAKE_CASE_ : List[str] = get_pairs(lowercase__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : str = min(lowercase__ , key=lambda lowercase__ : self.bpe_ranks.get(lowercase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : int = 0
while i < len(lowercase__ ):
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = word.index(lowercase__ , lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : List[Any] = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(lowercase__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_pairs(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = "@@ ".join(lowercase__ )
SCREAMING_SNAKE_CASE_ : str = word[:-4]
SCREAMING_SNAKE_CASE_ : Tuple = word
return word
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(r"\S+\n?" , lowercase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase__ ).split(" " ) ) )
return split_tokens
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.decoder.get(lowercase__ , self.unk_token )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = " ".join(lowercase__ ).replace("@@ " , "" ).strip()
return out_string
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + "\n" )
SCREAMING_SNAKE_CASE_ : Dict = 0
with open(lowercase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(" ".join(lowercase__ ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 712
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _a (unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = """laion/clap-htsat-unfused"""
snake_case : List[str] = tempfile.mkdtemp()
def snake_case_ ( self ,**__a ) -> Union[str, Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**__a )
def snake_case_ ( self ,**__a ) -> str:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**__a )
def snake_case_ ( self ) -> Dict:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
snake_case : int = self.get_tokenizer()
snake_case : Any = self.get_feature_extractor()
snake_case : List[str] = ClapProcessor(tokenizer=__a ,feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
snake_case : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__a )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,__a )
def snake_case_ ( self ) -> Any:
snake_case : Any = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
snake_case : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
snake_case : int = self.get_feature_extractor(do_normalize=__a ,padding_value=1.0 )
snake_case : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=__a ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__a )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,__a )
def snake_case_ ( self ) -> Any:
snake_case : Union[str, Any] = self.get_feature_extractor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Optional[Any] = ClapProcessor(tokenizer=__a ,feature_extractor=__a )
snake_case : List[Any] = floats_list((3, 1_000) )
snake_case : Dict = feature_extractor(__a ,return_tensors="""np""" )
snake_case : Dict = processor(audios=__a ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def snake_case_ ( self ) -> List[str]:
snake_case : List[str] = self.get_feature_extractor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : Tuple = ClapProcessor(tokenizer=__a ,feature_extractor=__a )
snake_case : str = """This is a test string"""
snake_case : List[Any] = processor(text=__a )
snake_case : Union[str, Any] = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def snake_case_ ( self ) -> Dict:
snake_case : Any = self.get_feature_extractor()
snake_case : Tuple = self.get_tokenizer()
snake_case : List[Any] = ClapProcessor(tokenizer=__a ,feature_extractor=__a )
snake_case : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : Optional[Any] = processor.batch_decode(__a )
snake_case : List[str] = tokenizer.batch_decode(__a )
self.assertListEqual(__a ,__a )
def snake_case_ ( self ) -> Optional[int]:
snake_case : Any = self.get_feature_extractor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Dict = ClapProcessor(tokenizer=__a ,feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 116
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase__ ( __lowercase ):
snake_case : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case : str = FileLock(str(tmpdir / """foo.lock""" ) )
snake_case : Any = 0.01
with locka.acquire():
with pytest.raises(__lowercase ):
snake_case : Union[str, Any] = time.time()
locka.acquire(__lowercase )
assert time.time() - _start > timeout
def lowerCamelCase__ ( __lowercase ):
snake_case : List[Any] = """a""" * 1_000 + """.lock"""
snake_case : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__lowercase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
snake_case : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowercase ):
locka.acquire(0 )
| 116
| 1
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = np.max(__UpperCamelCase , axis=-1 , keepdims=__UpperCamelCase )
A_ : List[str] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCamelCase )
class _lowerCAmelCase ( UpperCAmelCase_ ):
def _a (self , **lowercase ):
A_ : List[Any] = {}
if "second_text" in kwargs:
A_ : Tuple = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _a (self , lowercase , lowercase=None ):
return self.tokenizer(_snake_case , text_pair=_snake_case , return_tensors=self.framework )
def _a (self , lowercase ):
return self.model(**_snake_case )
def _a (self , lowercase ):
A_ : str = model_outputs.logits[0].numpy()
A_ : Optional[int] = softmax(_snake_case )
A_ : int = np.argmax(_snake_case )
A_ : Optional[int] = self.model.config.idalabel[best_class]
A_ : List[Any] = probabilities[best_class].item()
A_ : Tuple = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 708
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 686
| 0
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCamelCase = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCamelCase = {
"ctrl": 2_56,
}
_UpperCamelCase = {
"Pregnancy": 16_86_29,
"Christianity": 76_75,
"Explain": 10_64_23,
"Fitness": 6_34_40,
"Saving": 6_31_63,
"Ask": 2_71_71,
"Ass": 9_59_85,
"Joke": 16_35_09,
"Questions": 4_56_22,
"Thoughts": 4_96_05,
"Retail": 5_23_42,
"Feminism": 16_43_38,
"Writing": 1_19_92,
"Atheism": 19_22_63,
"Netflix": 4_86_16,
"Computing": 3_96_39,
"Opinion": 4_32_13,
"Alone": 4_49_67,
"Funny": 5_89_17,
"Gaming": 4_03_58,
"Human": 40_88,
"India": 13_31,
"Joker": 7_71_38,
"Diet": 3_62_06,
"Legal": 1_18_59,
"Norman": 49_39,
"Tip": 7_26_89,
"Weight": 5_23_43,
"Movies": 4_62_73,
"Running": 2_34_25,
"Science": 20_90,
"Horror": 3_77_93,
"Confession": 6_05_72,
"Finance": 1_22_50,
"Politics": 1_63_60,
"Scary": 19_19_85,
"Support": 1_26_54,
"Technologies": 3_25_16,
"Teenage": 6_61_60,
"Event": 3_27_69,
"Learned": 6_74_60,
"Notion": 18_27_70,
"Wikipedia": 3_75_83,
"Books": 66_65,
"Extract": 7_60_50,
"Confessions": 10_27_01,
"Conspiracy": 7_59_32,
"Links": 6_36_74,
"Narcissus": 15_04_25,
"Relationship": 5_47_66,
"Relationships": 13_47_96,
"Reviews": 4_16_71,
"News": 42_56,
"Translation": 2_68_20,
"multilingual": 12_84_06,
}
def _A( lowerCAmelCase ):
A__ : Dict = set()
A__ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ : List[Any] = char
A__ : List[str] = set(lowerCAmelCase )
return pairs
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = CONTROL_CODES
def __init__( self , snake_case_ , snake_case_ , snake_case_="<unk>" , **snake_case_ ):
'''simple docstring'''
super().__init__(unk_token=snake_case_ , **snake_case_ )
with open(snake_case_ , encoding="""utf-8""" ) as vocab_handle:
A__ : Union[str, Any] = json.load(snake_case_ )
A__ : Tuple = {v: k for k, v in self.encoder.items()}
with open(snake_case_ , encoding="""utf-8""" ) as merges_handle:
A__ : Tuple = merges_handle.read().split("""\n""" )[1:-1]
A__ : str = [tuple(merge.split() ) for merge in merges]
A__ : List[str] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A__ : Union[str, Any] = {}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.encoder )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A__ : Optional[int] = tuple(snake_case_ )
A__ : Dict = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A__ : Tuple = get_pairs(snake_case_ )
if not pairs:
return token
while True:
A__ : Optional[int] = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ : str = bigram
A__ : str = []
A__ : str = 0
while i < len(snake_case_ ):
try:
A__ : List[str] = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ : int = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ : Dict = tuple(snake_case_ )
A__ : int = new_word
if len(snake_case_ ) == 1:
break
else:
A__ : Any = get_pairs(snake_case_ )
A__ : Any = """@@ """.join(snake_case_ )
A__ : List[Any] = word[:-4]
A__ : Any = word
return word
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Optional[int] = []
A__ : List[str] = re.findall(r"""\S+\n?""" , snake_case_ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case_ ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = """ """.join(snake_case_ ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : int = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Union[str, Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + """\n""" )
A__ : str = 0
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
A__ : List[str] = token_index
writer.write(""" """.join(snake_case_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 363
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "sentencepiece.model"}
_UpperCamelCase = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
_UpperCamelCase = {
"google/rembert": 2_56,
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case_ , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
'''simple docstring'''
super().__init__(
do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
A__ : List[Any] = do_lower_case
A__ : Dict = remove_space
A__ : Optional[int] = keep_accents
A__ : Tuple = vocab_file
A__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(snake_case_ )
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
A__ : str = self.__dict__.copy()
A__ : Tuple = None
return state
def __setstate__( self , snake_case_ ):
'''simple docstring'''
A__ : Dict = d
A__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
A__ : Tuple = self.sp_model.EncodeAsPieces(snake_case_ )
return pieces
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : str = self.sp_model.decode_pieces(snake_case_ )
return out_string
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : List[Any] = [self.sep_token_id]
A__ : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : int = [self.sep_token_id]
A__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(snake_case_ ) )
return
A__ : Any = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 363
| 1
|
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""only integers accepted as input""" )
else:
_lowerCAmelCase =str(abs(lowercase__ ) )
_lowerCAmelCase =[list(lowercase__ ) for char in range(len(lowercase__ ) )]
for index in range(len(lowercase__ ) ):
num_transpositions[index].pop(lowercase__ )
return max(
int("""""".join(list(lowercase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 149
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Any = """esm"""
def __init__( self : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Optional[Any]=3072 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]=1026 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Union[str, Any] , ):
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =emb_layer_norm_before
_lowerCAmelCase =token_dropout
_lowerCAmelCase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_lowerCAmelCase =EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase =EsmFoldConfig(**lowerCamelCase_ )
_lowerCAmelCase =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_lowerCAmelCase =get_default_vocab_list()
else:
_lowerCAmelCase =vocab_list
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_ ):
_lowerCAmelCase =self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: str = None
a_: bool = True
a_: bool = False
a_: bool = False
a_: bool = False
a_: float = 0
a_: bool = True
a_: bool = False
a_: int = 1_28
a_: "TrunkConfig" = None
def lowerCAmelCase__ ( self : str ):
if self.trunk is None:
_lowerCAmelCase =TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_ ):
_lowerCAmelCase =TrunkConfig(**self.trunk )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 48
a_: int = 10_24
a_: int = 1_28
a_: int = 32
a_: int = 32
a_: int = 32
a_: float = 0
a_: float = 0
a_: bool = False
a_: int = 4
a_: Optional[int] = 1_28
a_: "StructureModuleConfig" = None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.structure_module is None:
_lowerCAmelCase =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_ ):
_lowerCAmelCase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowerCAmelCase =self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 3_84
a_: int = 1_28
a_: int = 16
a_: int = 1_28
a_: int = 12
a_: int = 4
a_: int = 8
a_: float = 0.1
a_: int = 8
a_: int = 1
a_: int = 2
a_: int = 7
a_: int = 10
a_: float = 1e-8
a_: float = 1e5
def lowerCAmelCase__ ( self : int ):
return asdict(self )
def snake_case_ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 149
| 1
|
from math import ceil, sqrt
def UpperCamelCase ( lowercase_ = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase__ : str = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : Union[str, Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : str = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 12
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = '''.'''
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 209
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class __snake_case ( a__):
_lowerCAmelCase = '''xlm'''
_lowerCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self, A=3_0145, A=2048, A=12, A=16, A=0.1, A=0.1, A=True, A=False, A=False, A=False, A=1, A=True, A=512, A=2048**-0.5, A=1e-12, A=0.02, A=0, A=1, A=2, A=3, A=5, A=True, A="first", A=True, A=None, A=True, A=0.1, A=5, A=5, A=0, A=0, A=2, A=0, **A, ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : Tuple = emb_dim
lowerCamelCase : str = n_layers
lowerCamelCase : Tuple = n_heads
lowerCamelCase : List[str] = dropout
lowerCamelCase : int = attention_dropout
lowerCamelCase : Optional[Any] = gelu_activation
lowerCamelCase : List[str] = sinusoidal_embeddings
lowerCamelCase : Optional[Any] = causal
lowerCamelCase : Optional[Any] = asm
lowerCamelCase : List[str] = n_langs
lowerCamelCase : Dict = use_lang_emb
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Union[str, Any] = bos_index
lowerCamelCase : List[str] = eos_index
lowerCamelCase : int = pad_index
lowerCamelCase : Optional[Any] = unk_index
lowerCamelCase : Optional[Any] = mask_index
lowerCamelCase : Tuple = is_encoder
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Optional[Any] = embed_init_std
lowerCamelCase : str = init_std
lowerCamelCase : Optional[Any] = summary_type
lowerCamelCase : Any = summary_use_proj
lowerCamelCase : Optional[Any] = summary_activation
lowerCamelCase : Any = summary_proj_to_labels
lowerCamelCase : Union[str, Any] = summary_first_dropout
lowerCamelCase : Tuple = start_n_top
lowerCamelCase : List[str] = end_n_top
lowerCamelCase : Dict = mask_token_id
lowerCamelCase : int = lang_id
if "n_words" in kwargs:
lowerCamelCase : Union[str, Any] = kwargs['n_words']
super().__init__(pad_token_id=A, bos_token_id=A, **A )
class __snake_case ( a__):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 449
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['CLIPFeatureExtractor']
A = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449
| 1
|
'''simple docstring'''
from math import pi
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 358
|
'''simple docstring'''
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Optional[Any] = []
_a : List[str] = 1
while len(lowerCAmelCase_ ) < 1E6:
constant.append(str(lowerCAmelCase_ ) )
i += 1
_a : Optional[Any] = ''.join(lowerCAmelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 358
| 1
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self : List[str] , _lowercase : Any , _lowercase : Dict=2 , _lowercase : Any=3 , _lowercase : Tuple=4 , _lowercase : Tuple=2 , _lowercase : List[Any]=7 , _lowercase : Tuple=True , _lowercase : int=True , _lowercase : List[Any]=True , _lowercase : List[str]=True , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=36 , _lowercase : List[str]=3 , _lowercase : Optional[int]=4 , _lowercase : Tuple=37 , _lowercase : str="gelu" , _lowercase : str=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : List[Any]=512 , _lowercase : Optional[Any]=16 , _lowercase : Any=2 , _lowercase : Optional[Any]=0.02 , _lowercase : List[str]=6 , _lowercase : Optional[int]=6 , _lowercase : Any=3 , _lowercase : Optional[Any]=4 , _lowercase : int=None , _lowercase : Tuple=1_000 , ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = parent
_UpperCamelCase: Union[str, Any] = batch_size
_UpperCamelCase: Any = num_channels
_UpperCamelCase: str = image_size
_UpperCamelCase: str = patch_size
_UpperCamelCase: Dict = text_seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Any = use_input_mask
_UpperCamelCase: Dict = use_token_type_ids
_UpperCamelCase: Optional[int] = use_labels
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: Union[str, Any] = hidden_size
_UpperCamelCase: str = num_hidden_layers
_UpperCamelCase: int = num_attention_heads
_UpperCamelCase: int = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: str = hidden_dropout_prob
_UpperCamelCase: Optional[int] = attention_probs_dropout_prob
_UpperCamelCase: Tuple = max_position_embeddings
_UpperCamelCase: List[str] = type_vocab_size
_UpperCamelCase: Optional[int] = type_sequence_label_size
_UpperCamelCase: Optional[int] = initializer_range
_UpperCamelCase: str = coordinate_size
_UpperCamelCase: Optional[Any] = shape_size
_UpperCamelCase: Optional[int] = num_labels
_UpperCamelCase: Any = num_choices
_UpperCamelCase: List[Any] = scope
_UpperCamelCase: List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_UpperCamelCase: str = text_seq_length
_UpperCamelCase: int = (image_size // patch_size) ** 2 + 1
_UpperCamelCase: Union[str, Any] = self.text_seq_length + self.image_seq_length
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_UpperCamelCase: Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase: Union[str, Any] = bbox[i, j, 3]
_UpperCamelCase: Optional[int] = bbox[i, j, 1]
_UpperCamelCase: str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase: Optional[int] = bbox[i, j, 2]
_UpperCamelCase: Optional[Any] = bbox[i, j, 0]
_UpperCamelCase: List[Any] = t
_UpperCamelCase: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase: Any = None
if self.use_input_mask:
_UpperCamelCase: Dict = random_attention_mask([self.batch_size, self.text_seq_length] )
_UpperCamelCase: Optional[int] = None
if self.use_token_type_ids:
_UpperCamelCase: int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_UpperCamelCase: str = None
_UpperCamelCase: str = None
if self.use_labels:
_UpperCamelCase: Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase: int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_UpperCamelCase: Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : Tuple , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : str , _lowercase : Dict , _lowercase : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Tuple = LayoutLMvaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
# text + image
_UpperCamelCase: Dict = model(_lowercase , pixel_values=_lowercase )
_UpperCamelCase: Optional[Any] = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
_UpperCamelCase: Dict = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase , token_type_ids=_lowercase )
_UpperCamelCase: Union[str, Any] = model(_lowercase , bbox=_lowercase , pixel_values=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_UpperCamelCase: Tuple = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_UpperCamelCase: Optional[Any] = model(pixel_values=_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: int = self.num_labels
_UpperCamelCase: int = LayoutLMvaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Any = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : int , _lowercase : int , _lowercase : Tuple , _lowercase : Dict ):
"""simple docstring"""
_UpperCamelCase: Any = self.num_labels
_UpperCamelCase: Any = LayoutLMvaForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Dict = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : int , _lowercase : Optional[int] , _lowercase : int ):
"""simple docstring"""
_UpperCamelCase: str = LayoutLMvaForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
_UpperCamelCase: Any = model(
_lowercase , bbox=_lowercase , pixel_values=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
): List[str] = config_and_inputs
_UpperCamelCase: Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : str = False
lowerCAmelCase : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Dict = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : Union[str, Any] , _lowercase : str , _lowercase : Dict , _lowercase : List[str] , _lowercase : Any , _lowercase : Dict ):
"""simple docstring"""
return True
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: List[str] = LayoutLMvaModelTester(self )
_UpperCamelCase: Optional[int] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Union[str, Any]=False ):
"""simple docstring"""
_UpperCamelCase: Any = copy.deepcopy(_lowercase )
if model_class in get_values(_lowercase ):
_UpperCamelCase: Any = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowercase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowercase ):
_UpperCamelCase: Optional[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
elif model_class in get_values(_lowercase ):
_UpperCamelCase: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
_UpperCamelCase: Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
elif model_class in [
*get_values(_lowercase ),
]:
_UpperCamelCase: Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
elif model_class in [
*get_values(_lowercase ),
]:
_UpperCamelCase: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowercase , )
return inputs_dict
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase: int = type
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase: Optional[int] = LayoutLMvaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
_UpperCamelCase: int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_lowercase ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: str = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_lowercase )
_UpperCamelCase: Optional[int] = self.default_image_processor
_UpperCamelCase: str = prepare_img()
_UpperCamelCase: Tuple = image_processor(images=_lowercase , return_tensors='''pt''' ).pixel_values.to(_lowercase )
_UpperCamelCase: int = torch.tensor([[1, 2]] )
_UpperCamelCase: List[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_UpperCamelCase: Optional[int] = model(
input_ids=input_ids.to(_lowercase ) , bbox=bbox.to(_lowercase ) , pixel_values=pixel_values.to(_lowercase ) , )
# verify the logits
_UpperCamelCase: Union[str, Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowercase )
_UpperCamelCase: int = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ) )
| 264
|
def lowerCAmelCase_ ( lowercase: int = 600_851_475_143 ) -> int:
'''simple docstring'''
try:
_UpperCamelCase: Optional[int] = int(lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
_UpperCamelCase: Dict = 1
_UpperCamelCase: Tuple = 2
while i * i <= n:
while n % i == 0:
_UpperCamelCase: List[Any] = i
n //= i
i += 1
if n > 1:
_UpperCamelCase: Any = n
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 264
| 1
|
import string
import numpy
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return b if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase_ )
class __lowercase :
UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase = numpy.vectorize(lambda __snake_case : x % 36 )
UpperCamelCase = numpy.vectorize(__snake_case )
def __init__( self : Union[str, Any] , __lowerCamelCase : numpy.ndarray ) -> None:
"""simple docstring"""
UpperCAmelCase = self.modulus(__lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase = encrypt_key.shape[0]
def _lowercase ( self : List[str] , __lowerCamelCase : str ) -> int:
"""simple docstring"""
return self.key_string.index(__lowerCamelCase )
def _lowercase ( self : str , __lowerCamelCase : int ) -> str:
"""simple docstring"""
return self.key_string[round(__lowerCamelCase )]
def _lowercase ( self : Optional[Any] ) -> None:
"""simple docstring"""
UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase = det % len(self.key_string )
UpperCAmelCase = len(self.key_string )
if greatest_common_divisor(__lowerCamelCase , len(self.key_string ) ) != 1:
UpperCAmelCase = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCamelCase )
def _lowercase ( self : Dict , __lowerCamelCase : str ) -> str:
"""simple docstring"""
UpperCAmelCase = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase = chars[-1]
while len(__lowerCamelCase ) % self.break_key != 0:
chars.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def _lowercase ( self : int , __lowerCamelCase : str ) -> str:
"""simple docstring"""
UpperCAmelCase = self.process_text(text.upper() )
UpperCAmelCase = """"""
for i in range(0 , len(__lowerCamelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase = text[i : i + self.break_key]
UpperCAmelCase = [self.replace_letters(__lowerCamelCase ) for char in batch]
UpperCAmelCase = numpy.array([vec] ).T
UpperCAmelCase = self.modulus(self.encrypt_key.dot(__lowerCamelCase ) ).T.tolist()[
0
]
UpperCAmelCase = """""".join(
self.replace_digits(__lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self : Any ) -> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
UpperCAmelCase = det % len(self.key_string )
UpperCAmelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
UpperCAmelCase = i
break
UpperCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCamelCase ) )
def _lowercase ( self : List[Any] , __lowerCamelCase : str ) -> str:
"""simple docstring"""
UpperCAmelCase = self.make_decrypt_key()
UpperCAmelCase = self.process_text(text.upper() )
UpperCAmelCase = """"""
for i in range(0 , len(__lowerCamelCase ) - self.break_key + 1 , self.break_key ):
UpperCAmelCase = text[i : i + self.break_key]
UpperCAmelCase = [self.replace_letters(__lowerCamelCase ) for char in batch]
UpperCAmelCase = numpy.array([vec] ).T
UpperCAmelCase = self.modulus(decrypt_key.dot(__lowerCamelCase ) ).T.tolist()[0]
UpperCAmelCase = """""".join(
self.replace_digits(__lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _UpperCamelCase ( ) ->None:
UpperCAmelCase = int(input("""Enter the order of the encryption key: """ ) )
UpperCAmelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCAmelCase_ ):
UpperCAmelCase = [int(lowerCAmelCase_ ) for x in input().split()]
hill_matrix.append(lowerCAmelCase_ )
UpperCAmelCase = HillCipher(numpy.array(lowerCAmelCase_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
UpperCAmelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
UpperCAmelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCAmelCase_ ) )
elif option == "2":
UpperCAmelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 377
|
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 2
while digits < n:
index += 1
UpperCAmelCase = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0 ) ->int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 377
| 1
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__lowercase = logging.getLogger(__name__)
__lowercase = '''Hello world! cécé herlolip'''
__lowercase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def snake_case__ ( _A: Dict , _A: Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = BertAbsConfig(
temp_dir=""".""" , finetune_bert=_A , large=_A , share_emb=_A , use_bert_emb=_A , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase = torch.load(_A , lambda _A , _A : storage )
lowerCAmelCase = AbsSummarizer(_A , torch.device("""cpu""" ) , _A )
original.eval()
lowerCAmelCase = BertAbsSummarizer(_A , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
lowerCAmelCase = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
lowerCAmelCase = torch.tensor(_A ).unsqueeze(0 )
lowerCAmelCase = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_A )) )
lowerCAmelCase = torch.tensor(_A ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase = encoder_input_ids
lowerCAmelCase = decoder_input_ids
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase = original(_A , _A , _A , _A , _A , _A , _A )[0]
lowerCAmelCase = original.generator(_A )
lowerCAmelCase = new_model(
_A , _A , _A , _A , _A )[0]
lowerCAmelCase = new_model.generator(_A )
lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(_A ) )
lowerCAmelCase = torch.allclose(_A , _A , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__lowercase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 605
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def snake_case__ ( _A: list ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
lowerCAmelCase = {"""+""", """-""", """*""", """/"""}
lowerCAmelCase = []
for token in postfix_notation:
if token in operations:
lowerCAmelCase , lowerCAmelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_A ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[str],A_: str,A_: bool,A_: str = None,A_: list = None ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = os.path.abspath(os.path.join('examples','by_feature' ) )
__UpperCamelCase = os.path.abspath('examples' )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase = os.path.join(A_,A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_,feature_script=A_,tested_section='main()' if parser_only else 'training_function()',):
__UpperCamelCase = compare_against_test(
os.path.join(A_,A_ ),A_,A_,A_ )
__UpperCamelCase = '\n'.join(A_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase = diff.replace(A_,'' )
self.assertEqual(A_,'' )
def snake_case_ ( self: int ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py',A_ )
self.one_complete_example('complete_nlp_example.py',A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = os.path.abspath(os.path.join('examples','cv_example.py' ) )
__UpperCamelCase = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py',A_,A_,A_ )
self.one_complete_example('complete_cv_example.py',A_,A_,A_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __lowerCamelCase (_a ):
_lowercase = False
@classmethod
def snake_case_ ( cls: int ):
'''simple docstring'''
super().setUpClass()
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = os.path.join(cls._tmpdir,'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def snake_case_ ( cls: Optional[Any] ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'epoch_0' ) ) )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'step_2' ) ) )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir,'epoch_0' )}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
self.assertNotIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir,'step_2' )}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
if torch.cuda.is_available():
__UpperCamelCase = torch.cuda.device_count()
else:
__UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
else:
self.assertIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ,{'TESTING_MOCKED_DATALOADERS': '0'} ):
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
__UpperCamelCase = re.findall('({.+})',A_ )
__UpperCamelCase = [r for r in results if 'accuracy' in r][-1]
__UpperCamelCase = ast.literal_eval(A_ )
self.assertGreaterEqual(results['accuracy'],0.7_5 )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ,{'WANDB_MODE': 'offline'} )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_,'tracking' ) ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
__lowerCAmelCase = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 0
|
'''simple docstring'''
def snake_case ( a_ : Union[str, Any] ) -> float:
"""simple docstring"""
UpperCamelCase_ : str = 0
while len(__lowerCAmelCase ) > 1:
UpperCamelCase_ : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase_ : int = files.index(min(__lowerCAmelCase ) )
temp += files[min_index]
files.pop(__lowerCAmelCase )
files.append(__lowerCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase =["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def snake_case ( a_ : List[str] , a_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case ( a_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=a_ )
def snake_case ( a_ : Any , a_ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase_ : str = test_hf_cache_home / """datasets"""
UpperCamelCase_ : Any = test_hf_cache_home / """metrics"""
UpperCamelCase_ : List[Any] = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(a_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(a_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(a_ ) )
UpperCamelCase_ : List[Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(a_ ) )
UpperCamelCase_ : Any = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a_ ) )
@pytest.fixture(autouse=a_ , scope="""session""" )
def snake_case ( ) -> Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=a_ )
def snake_case ( a_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , a_ )
@pytest.fixture
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , a_ )
| 543
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
__lowerCAmelCase ={
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1E-5,
"""token_type_vocab_size""": 2,
}
__lowerCAmelCase =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowerCAmelCase =BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowerCAmelCase ="""openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
__lowerCAmelCase =os.path.join(get_home_dir() , """models""" )
__lowerCAmelCase =_load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
__lowerCAmelCase =nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
__lowerCAmelCase =original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowerCAmelCase ={
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(__lowerCamelCase ),
}
__lowerCAmelCase =BertConfig.from_dict(__lowerCamelCase )
__lowerCAmelCase =BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase : Dict ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase : Optional[Any] , __lowerCamelCase : int ):
__lowerCAmelCase =hf_param.shape
__lowerCAmelCase =to_torch(params[gluon_param] )
__lowerCAmelCase =gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__lowerCAmelCase =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
__lowerCAmelCase =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
__lowerCAmelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
__lowerCAmelCase =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowerCAmelCase =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowerCAmelCase =hf_bort_model.bert.encoder.layer[i]
# self attention
__lowerCAmelCase =layer.attention.self
__lowerCAmelCase =check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__lowerCAmelCase =check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__lowerCAmelCase =check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__lowerCAmelCase =check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__lowerCAmelCase =check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__lowerCAmelCase =check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__lowerCAmelCase =layer.attention.output
__lowerCAmelCase =check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
__lowerCAmelCase =check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
__lowerCAmelCase =check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__lowerCAmelCase =check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__lowerCAmelCase =layer.intermediate
__lowerCAmelCase =check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__lowerCAmelCase =check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__lowerCAmelCase =layer.output
__lowerCAmelCase =check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__lowerCAmelCase =check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__lowerCAmelCase =check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__lowerCAmelCase =check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowerCAmelCase =RobertaTokenizer.from_pretrained("""roberta-base""" )
__lowerCAmelCase =tokenizer.encode_plus(__lowerCamelCase )["""input_ids"""]
# Get gluon output
__lowerCAmelCase =mx.nd.array([input_ids] )
__lowerCAmelCase =original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
__lowerCAmelCase =BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
__lowerCAmelCase =tokenizer.encode_plus(__lowerCamelCase , return_tensors="""pt""" )
__lowerCAmelCase =hf_bort_model(**__lowerCamelCase )[0]
__lowerCAmelCase =output_gluon[0].asnumpy()
__lowerCAmelCase =output_hf[0].detach().numpy()
__lowerCAmelCase =np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowerCAmelCase =np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , __lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 354
|
def __lowerCAmelCase ( __lowerCamelCase : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
_lowerCamelCase : Tuple = {
'''facebook/s2t-small-librispeech-asr''': 10_24,
}
_lowerCamelCase : Optional[int] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
_lowerCamelCase : str = {'''mustc''': MUSTC_LANGS}
class lowercase ( a__ ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : List[Any] = MAX_MODEL_INPUT_SIZES
lowercase__ : List[str] = ["""input_ids""", """attention_mask"""]
lowercase__ : List[int] = []
def __init__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Union[str, Any]="</s>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Tuple=None , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any] = None , **_UpperCamelCase : List[Any] , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , do_upper_case=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , lang_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = do_upper_case
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = load_json(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = spm_file
SCREAMING_SNAKE_CASE = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE = lang_codes
SCREAMING_SNAKE_CASE = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE = [F"<lang:{lang}>" for lang in self.langs]
SCREAMING_SNAKE_CASE = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
SCREAMING_SNAKE_CASE = self.lang_tokens
SCREAMING_SNAKE_CASE = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE = {}
@property
def __snake_case( self : int ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def __snake_case( self : str , _UpperCamelCase : List[Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase__ )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Dict ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE = [lang_code_id]
def __snake_case( self : str , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __snake_case( self : Dict , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def __snake_case( self : int , _UpperCamelCase : Optional[int] ) -> str:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __snake_case( self : Tuple , _UpperCamelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE = self.sp_model.decode(lowerCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.sp_model.decode(lowerCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case( self : Any , _UpperCamelCase : int , _UpperCamelCase : Optional[int]=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case( self : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any = None , _UpperCamelCase : Any = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : Dict , _UpperCamelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = load_spm(self.spm_file , self.sp_model_kwargs )
def __snake_case( self : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Path(lowerCAmelCase__ )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Dict[str, Any] ):
SCREAMING_SNAKE_CASE = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def __lowerCamelCase (UpperCAmelCase__ : str ):
with open(_lowercase , "r" ) as f:
return json.load(_lowercase )
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ):
with open(_lowercase , "w" ) as f:
json.dump(_lowercase , _lowercase , indent=2 )
| 709
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
_lowerCamelCase : List[Any] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 647
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ):
'''simple docstring'''
_a : Any = x_start
_a : Union[str, Any] = fnc(UpperCamelCase__ )
_a : Optional[Any] = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_a : str = (x_end - x_start) / steps + xa
_a : Union[str, Any] = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
_a : List[Any] = xa
_a : List[Any] = fxa
return area
if __name__ == "__main__":
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_snake_case = 10
while i <= 10_0000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 389
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389
| 1
|
def A_ ( lowercase_ ) ->List[Any]:
"""simple docstring"""
def merge(lowercase_ , lowercase_ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
SCREAMING_SNAKE_CASE = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 715
|
__UpperCAmelCase = 9.80_665
def A_ ( lowercase_ , lowercase_ , lowercase_ = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 259
| 0
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *a_ ,**a_ ):
"""simple docstring"""
pass
def UpperCAmelCase_ ( snake_case__ ) -> int:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_lowerCAmelCase : int = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model=a_ ,tokenizer=a_ ,image_processor=a_ )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(a_ ) ,a_ ,'' ) ) )
lowerCAmelCase__ = 'What is the placebo?'
lowerCAmelCase__ = [
{
'image': load_image(a_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = dqa_pipeline(a_ ,top_k=2 )
self.assertEqual(
a_ ,[
[
{'score': ANY(a_ ), 'answer': ANY(a_ ), 'start': ANY(a_ ), 'end': ANY(a_ )},
{'score': ANY(a_ ), 'answer': ANY(a_ ), 'start': ANY(a_ ), 'end': ANY(a_ )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2' )
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'How many cats are there?'
lowerCAmelCase__ = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(nested_simplify(a_ ,decimals=4 ) ,a_ )
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(nested_simplify(a_ ,decimals=4 ) ,a_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(a_ ,[] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,words=a_ ,boxes=a_ ,top_k=2 )
self.assertEqual(a_ ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
[
{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=50 ,)
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
[
{'score': 0.9974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=a_ )
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=a_ ,revision='3dc6de3' ,)
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
lowerCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 ,)
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(a_ ) ,a_ ,'' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0819, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=a_ )
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=a_ ,revision='3dc6de3' ,max_seq_len=50 ,)
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
lowerCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
lowerCAmelCase__ = list(zip(*apply_tesseract(load_image(a_ ) ,a_ ,'' ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(a_ ,decimals=4 ) ,[
{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
lowerCAmelCase__ = INVOICE_URL
lowerCAmelCase__ = 'What is the invoice number?'
lowerCAmelCase__ = dqa_pipeline(image=a_ ,question=a_ ,top_k=2 )
self.assertEqual(nested_simplify(a_ ,decimals=4 ) ,[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
| 193
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase : List[str] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( snake_case__ ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = f'https://www.instagram.com/{username}/'
lowerCAmelCase__ = self.get_json()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = requests.get(self.url ,headers=a_ ).text
lowerCAmelCase__ = BeautifulSoup(a_ ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def UpperCAmelCase_ ( snake_case__ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(snake_case__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 193
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = 42
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase ):
@register_to_config
def __init__( self , lowercase = 16 , lowercase = 88 , lowercase = None , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = 32 , lowercase = None , lowercase = False , lowercase = None , lowercase = "geglu" , lowercase = True , lowercase = True , ) -> List[Any]:
super().__init__()
_a : Optional[int] = num_attention_heads
_a : Any = attention_head_dim
_a : Tuple = num_attention_heads * attention_head_dim
_a : Tuple = in_channels
_a : Dict = torch.nn.GroupNorm(num_groups=lowercase , num_channels=lowercase , eps=1e-6 , affine=lowercase )
_a : str = nn.Linear(lowercase , lowercase )
# 3. Define transformers blocks
_a : int = nn.ModuleList(
[
BasicTransformerBlock(
lowercase , lowercase , lowercase , dropout=lowercase , cross_attention_dim=lowercase , activation_fn=lowercase , attention_bias=lowercase , double_self_attention=lowercase , norm_elementwise_affine=lowercase , )
for d in range(lowercase )
] )
_a : List[Any] = nn.Linear(lowercase , lowercase )
def snake_case__( self , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=1 , lowercase=None , lowercase = True , ) -> int:
_a : Optional[Any] = hidden_states.shape
_a : Dict = batch_frames // num_frames
_a : Union[str, Any] = hidden_states
_a : Optional[int] = hidden_states[None, :].reshape(lowercase , lowercase , lowercase , lowercase , lowercase )
_a : List[str] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_a : Optional[Any] = self.norm(lowercase )
_a : List[Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase , lowercase )
_a : Any = self.proj_in(lowercase )
# 2. Blocks
for block in self.transformer_blocks:
_a : Tuple = block(
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , class_labels=lowercase , )
# 3. Output
_a : Optional[Any] = self.proj_out(lowercase )
_a : List[Any] = (
hidden_states[None, None, :]
.reshape(lowercase , lowercase , lowercase , lowercase , lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_a : Optional[Any] = hidden_states.reshape(lowercase , lowercase , lowercase , lowercase )
_a : List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=lowercase )
| 704
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''input_features''', '''is_longer''']
def __init__( self , lowercase=64 , lowercase=48_000 , lowercase=480 , lowercase=10 , lowercase=1_024 , lowercase=0.0 , lowercase=False , lowercase = 0 , lowercase = 14_000 , lowercase = None , lowercase = "fusion" , lowercase = "repeatpad" , **lowercase , ) -> str:
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
_a : int = top_db
_a : Union[str, Any] = truncation
_a : Optional[int] = padding
_a : List[Any] = fft_window_size
_a : str = (fft_window_size >> 1) + 1
_a : Optional[Any] = hop_length
_a : Tuple = max_length_s
_a : Optional[Any] = max_length_s * sampling_rate
_a : Dict = sampling_rate
_a : int = frequency_min
_a : Union[str, Any] = frequency_max
_a : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm=lowercase , mel_scale='''htk''' , )
_a : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowercase , min_frequency=lowercase , max_frequency=lowercase , sampling_rate=lowercase , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case__( self ) -> Dict[str, Any]:
_a : int = copy.deepcopy(self.__dict__ )
_a : str = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__( self , lowercase , lowercase = None ) -> np.ndarray:
_a : List[str] = spectrogram(
lowercase , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowercase , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case__( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
_a : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_a : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_a : str = [0]
# randomly choose index for each part
_a : str = np.random.choice(ranges[0] )
_a : Optional[int] = np.random.choice(ranges[1] )
_a : Tuple = np.random.choice(ranges[2] )
_a : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
_a : List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
_a : Tuple = mel[idx_back : idx_back + chunk_frames, :]
_a : List[str] = torch.tensor(mel[None, None, :] )
_a : str = torch.nn.functional.interpolate(
lowercase , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=lowercase )
_a : Dict = mel_shrink[0][0].numpy()
_a : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case__( self , lowercase , lowercase , lowercase , lowercase ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_a : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_a : List[Any] = len(lowercase ) - max_length
_a : Union[str, Any] = np.random.randint(0 , overflow + 1 )
_a : Optional[int] = waveform[idx : idx + max_length]
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_a : str = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_a : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_a : Dict = np.stack([mel, mel, mel, mel] , axis=0 )
_a : Tuple = False
else:
_a : List[str] = self._random_mel_fusion(lowercase , lowercase , lowercase )
_a : Tuple = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
_a : Optional[Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_a : Tuple = int(max_length / len(lowercase ) )
_a : str = np.stack(np.tile(lowercase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_a : int = int(max_length / len(lowercase ) )
_a : Optional[Any] = np.stack(np.tile(lowercase , lowercase ) )
_a : str = np.pad(lowercase , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_a : List[Any] = self._np_extract_fbank_features(lowercase , self.mel_filters )
_a : int = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_a : Optional[int] = self._np_extract_fbank_features(lowercase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
_a : str = truncation if truncation is not None else self.truncation
_a : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_a : List[str] = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_a : Dict = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a : List[str] = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
_a : Any = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : int = [np.asarray(lowercase )]
# convert to mel spectrogram, truncate and pad if needed.
_a : Union[str, Any] = [
self._get_input_mel(lowercase , max_length if max_length else self.nb_max_samples , lowercase , lowercase )
for waveform in raw_speech
]
_a : Tuple = []
_a : int = []
for mel, longer in padded_inputs:
input_mel.append(lowercase )
is_longer.append(lowercase )
if truncation == "fusion" and sum(lowercase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_a : str = np.random.randint(0 , len(lowercase ) )
_a : Optional[int] = True
if isinstance(input_mel[0] , lowercase ):
_a : Union[str, Any] = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_a : Dict = [[longer] for longer in is_longer]
_a : int = {'''input_features''': input_mel, '''is_longer''': is_longer}
_a : str = BatchFeature(lowercase )
if return_tensors is not None:
_a : Dict = input_features.convert_to_tensors(lowercase )
return input_features
| 307
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Dict = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , snake_case__=None , snake_case__=None , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F''' {self.model.__class__}'''
)
_SCREAMING_SNAKE_CASE : Tuple = self.model.config
else:
_SCREAMING_SNAKE_CASE : List[str] = config
_SCREAMING_SNAKE_CASE : int = data_args
_SCREAMING_SNAKE_CASE : Tuple = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_SCREAMING_SNAKE_CASE : Optional[Any] = label_smoothed_nll_loss
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.optimizer is None:
_SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.weight"]
_SCREAMING_SNAKE_CASE : Tuple = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_SCREAMING_SNAKE_CASE : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_SCREAMING_SNAKE_CASE : Union[str, Any] = Adafactor
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"scale_parameter": False, "relative_step": False}
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = AdamW
_SCREAMING_SNAKE_CASE : str = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_SCREAMING_SNAKE_CASE : Dict = self.args.learning_rate
if self.sharded_ddp:
_SCREAMING_SNAKE_CASE : Optional[int] = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
_SCREAMING_SNAKE_CASE : List[Any] = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
_SCREAMING_SNAKE_CASE : List[Any] = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_SCREAMING_SNAKE_CASE : List[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_SCREAMING_SNAKE_CASE : Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_SCREAMING_SNAKE_CASE : Dict = model(**snake_case__ , use_cache=snake_case__ )[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
_SCREAMING_SNAKE_CASE : str = model(**snake_case__ , use_cache=snake_case__ )[0]
_SCREAMING_SNAKE_CASE : Any = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = inputs.pop("labels" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self._prepare_inputs(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_SCREAMING_SNAKE_CASE : int = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE : Any = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
_SCREAMING_SNAKE_CASE : Optional[int] = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_SCREAMING_SNAKE_CASE : int = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE : str = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F''' padded to `max_length`={max_length}''' )
_SCREAMING_SNAKE_CASE : List[str] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_SCREAMING_SNAKE_CASE : int = tensor
return padded_tensor
| 572
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : List[str] = 1
while repunit:
_SCREAMING_SNAKE_CASE : Tuple = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _lowerCAmelCase ( lowerCamelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'{solution() = }')
| 572
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCAmelCase : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
UpperCAmelCase : str = "text"
UpperCAmelCase : str = "summary"
@property
def lowerCAmelCase_ ( self : List[str] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 505
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a = logging.get_logger(__name__)
a = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = '''van'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any]=224 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[Any]=[7, 3, 3, 3] , _UpperCAmelCase : Optional[int]=[4, 2, 2, 2] , _UpperCAmelCase : Tuple=[64, 128, 320, 512] , _UpperCAmelCase : Optional[int]=[3, 3, 12, 3] , _UpperCAmelCase : Union[str, Any]=[8, 8, 4, 4] , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : Optional[Any]=1E-2 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , **_UpperCAmelCase : str , ):
super().__init__(**_UpperCAmelCase )
_A = image_size
_A = num_channels
_A = patch_sizes
_A = strides
_A = hidden_sizes
_A = depths
_A = mlp_ratios
_A = hidden_act
_A = initializer_range
_A = layer_norm_eps
_A = layer_scale_init_value
_A = drop_path_rate
_A = dropout_rate
| 505
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : Union[str, Any] = 'timesformer'
def __init__( self : List[Any] , __lowercase : Optional[Any]=224 , __lowercase : List[Any]=16 , __lowercase : int=3 , __lowercase : Tuple=8 , __lowercase : str=768 , __lowercase : List[Any]=12 , __lowercase : List[Any]=12 , __lowercase : Dict=3072 , __lowercase : Dict="gelu" , __lowercase : Dict=0.0 , __lowercase : Dict=0.0 , __lowercase : int=0.02 , __lowercase : List[str]=1e-6 , __lowercase : Tuple=True , __lowercase : Tuple="divided_space_time" , __lowercase : Any=0 , **__lowercase : List[Any] , ) -> Tuple:
super().__init__(**__lowercase )
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : str = num_frames
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = qkv_bias
__UpperCAmelCase : Union[str, Any] = attention_type
__UpperCAmelCase : Dict = drop_path_rate
| 63
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
| 0
|
from __future__ import annotations
import time
snake_case__ = list[tuple[int, int]]
snake_case__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
_lowerCamelCase = pos_x
_lowerCamelCase = pos_y
_lowerCamelCase = (pos_y, pos_x)
_lowerCamelCase = goal_x
_lowerCamelCase = goal_y
_lowerCamelCase = parent
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> str:
"""simple docstring"""
_lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = [self.start]
_lowerCamelCase = False
def UpperCamelCase_ ( self ) -> Path | None:
"""simple docstring"""
while self.node_queue:
_lowerCamelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCamelCase = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = self.get_successors(_SCREAMING_SNAKE_CASE )
for node in successors:
self.node_queue.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase_ ( self , A_ ) -> list[Node]:
"""simple docstring"""
_lowerCamelCase = []
for action in delta:
_lowerCamelCase = parent.pos_x + action[1]
_lowerCamelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , _SCREAMING_SNAKE_CASE ) )
return successors
def UpperCamelCase_ ( self , A_ ) -> Path:
"""simple docstring"""
_lowerCamelCase = node
_lowerCamelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCamelCase = current_node.parent
path.reverse()
return path
class UpperCamelCase :
'''simple docstring'''
def __init__( self , A_ , A_ ) -> Any:
"""simple docstring"""
_lowerCamelCase = BreadthFirstSearch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = BreadthFirstSearch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = False
def UpperCamelCase_ ( self ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCamelCase = self.fwd_bfs.node_queue.pop(0 )
_lowerCamelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCamelCase = True
return self.retrace_bidirectional_path(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCamelCase = current_bwd_node
_lowerCamelCase = current_fwd_node
_lowerCamelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(_SCREAMING_SNAKE_CASE ),
self.bwd_bfs: self.bwd_bfs.get_successors(_SCREAMING_SNAKE_CASE ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase_ ( self , A_ , A_ ) -> Path:
"""simple docstring"""
_lowerCamelCase = self.fwd_bfs.retrace_path(_SCREAMING_SNAKE_CASE )
_lowerCamelCase = self.bwd_bfs.retrace_path(_SCREAMING_SNAKE_CASE )
bwd_path.pop()
bwd_path.reverse()
_lowerCamelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case__ = (0, 0)
snake_case__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case__ = time.time()
snake_case__ = BreadthFirstSearch(init, goal)
snake_case__ = bfs.search()
snake_case__ = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
snake_case__ = time.time()
snake_case__ = BidirectionalBreadthFirstSearch(init, goal)
snake_case__ = bd_bfs.search()
snake_case__ = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 709
|
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
_lowerCamelCase = len(__UpperCAmelCase )
_lowerCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_lowerCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_lowerCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_lowerCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
_lowerCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638
| 0
|
from math import factorial
__magic_name__ = {str(d): factorial(d) for d in range(10)}
def UpperCAmelCase__( __UpperCAmelCase : int ):
return sum(DIGIT_FACTORIAL[d] for d in str(__UpperCAmelCase ) )
def UpperCAmelCase__( ):
__snake_case : Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __UpperCAmelCase ) if sum_of_digit_factorial(__UpperCAmelCase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 576
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ):
__snake_case : List[str] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__snake_case : int = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__snake_case : Tuple = in_proj_weight[
: encoder_config.hidden_size, :
]
__snake_case : Tuple = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__snake_case : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
__snake_case : Any = dct.pop(__UpperCAmelCase )
__snake_case : Optional[int] = val
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if "handwritten" in checkpoint_url:
__snake_case : Any = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case : Union[str, Any] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__snake_case : Any = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str ):
__snake_case : List[Any] = ViTConfig(image_size=3_84 , qkv_bias=__UpperCAmelCase )
__snake_case : int = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__snake_case : List[Any] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
__snake_case : str = 10_24
__snake_case : List[Any] = 40_96
__snake_case : Tuple = 24
__snake_case : Dict = 16
__snake_case : Union[str, Any] = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__snake_case : Optional[Any] = False
__snake_case : List[Any] = 'relu'
__snake_case : List[str] = 10_24
__snake_case : Union[str, Any] = True
__snake_case : Tuple = False
__snake_case : Optional[Any] = False
# load HuggingFace model
__snake_case : List[Any] = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase )
__snake_case : Dict = TrOCRForCausalLM(__UpperCAmelCase )
__snake_case : Optional[Any] = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
__snake_case : Any = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' , check_hash=__UpperCAmelCase )['model']
__snake_case : int = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__snake_case : Dict = state_dict.pop(__UpperCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
__snake_case : Optional[int] = val
else:
__snake_case : Tuple = val
# load state dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
__snake_case : Dict = ViTImageProcessor(size=encoder_config.image_size )
__snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-large' )
__snake_case : List[Any] = TrOCRProcessor(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = processor(images=prepare_img(__UpperCAmelCase ) , return_tensors='pt' ).pixel_values
# verify logits
__snake_case : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__snake_case : List[Any] = model(pixel_values=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
__snake_case : List[Any] = outputs.logits
__snake_case : Dict = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
__snake_case : List[str] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__snake_case : Dict = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__snake_case : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__snake_case : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 576
| 1
|
def _UpperCAmelCase ( A = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 510
|
import math
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3 , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=3_0 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__magic_name__ :int = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__magic_name__ :Tuple = parent
__magic_name__ :Tuple = batch_size
__magic_name__ :Any = num_channels
__magic_name__ :List[Any] = image_size
__magic_name__ :Union[str, Any] = min_resolution
__magic_name__ :Optional[Any] = max_resolution
__magic_name__ :List[str] = do_resize
__magic_name__ :Any = size
__magic_name__ :Optional[Any] = do_normalize
__magic_name__ :Union[str, Any] = image_mean
__magic_name__ :str = image_std
def A ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = ViTImageProcessor if is_vision_available() else None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def A ( self ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
def A ( self ):
"""simple docstring"""
pass
def A ( self ):
"""simple docstring"""
# Initialize image_processor
__magic_name__ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ :Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
__magic_name__ :List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ :int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def A ( self ):
"""simple docstring"""
# Initialize image_processor
__magic_name__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
__magic_name__ :Tuple = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ :Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def A ( self ):
"""simple docstring"""
# Initialize image_processor
__magic_name__ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
__magic_name__ :Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__magic_name__ :List[str] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 0
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase_ ( lowerCamelCase , unittest.TestCase ):
a__ = MobileBertTokenizer
a__ = MobileBertTokenizerFast
a__ = True
a__ = True
a__ = filter_non_english
a__ = '''google/mobilebert-uncased'''
def A ( self ):
"""simple docstring"""
super().setUp()
__magic_name__ :Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__magic_name__ :List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = '''UNwant\u00E9d,running'''
__magic_name__ :int = '''unwanted, running'''
return input_text, output_text
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class(self.vocab_file )
__magic_name__ :List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def A ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__magic_name__ :int = self.get_tokenizer()
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :List[str] = '''UNwant\u00E9d,running'''
__magic_name__ :Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :int = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :str = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer()
__magic_name__ :Any = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :Any = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# With lower casing
__magic_name__ :Any = self.get_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :List[Any] = self.get_rust_tokenizer(do_lower_case=__lowerCAmelCase )
__magic_name__ :Dict = '''UNwant\u00E9d,running'''
__magic_name__ :Tuple = tokenizer.tokenize(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = self.get_rust_tokenizer()
__magic_name__ :Dict = tokenizer.encode(__lowerCAmelCase )
__magic_name__ :List[Any] = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = BasicTokenizer(do_lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = BasicTokenizer(do_lower_case=__lowerCAmelCase , strip_accents=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = BasicTokenizer(do_lower_case=__lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :int = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__magic_name__ :Union[str, Any] = {}
for i, token in enumerate(__lowerCAmelCase ):
__magic_name__ :Tuple = i
__magic_name__ :List[Any] = WordpieceTokenizer(vocab=__lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def A ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.get_tokenizer()
__magic_name__ :Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__magic_name__ :Optional[int] = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__magic_name__ :List[Any] = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def A ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__magic_name__ :Optional[Any] = tokenizer_r.encode_plus(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , )
__magic_name__ :Any = tokenizer_r.do_lower_case if hasattr(__lowerCAmelCase , '''do_lower_case''' ) else False
__magic_name__ :Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = ['''的''', '''人''', '''有''']
__magic_name__ :Any = ''''''.join(__lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ :Optional[Any] = True
__magic_name__ :Optional[int] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Dict = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :List[str] = False
__magic_name__ :Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :List[str] = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ :Optional[Any] = tokenizer_r.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :Union[str, Any] = tokenizer_p.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__magic_name__ :List[str] = tokenizer_r.convert_ids_to_tokens(__lowerCAmelCase )
__magic_name__ :Optional[int] = tokenizer_p.convert_ids_to_tokens(__lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
__magic_name__ :Dict = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__lowerCAmelCase )
]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0
| 1
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
class _lowercase ( lowercase__):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : List[str] = nn.ModuleList(__lowerCamelCase )
def lowerCAmelCase ( self : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : torch.Tensor , __lowerCamelCase : List[torch.tensor] , __lowerCamelCase : List[float] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[Dict[str, Any]] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCamelCase , __lowerCamelCase , self.nets ) ):
lowerCamelCase__ : int = controlnet(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# merge samples
if i == 0:
lowerCamelCase__ : Union[str, Any] = down_samples, mid_sample
else:
lowerCamelCase__ : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCamelCase , __lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, os.PathLike] , __lowerCamelCase : bool = True , __lowerCamelCase : Callable = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[str] = None , ):
'''simple docstring'''
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCamelCase , is_main_process=__lowerCamelCase , save_function=__lowerCamelCase , safe_serialization=__lowerCamelCase , variant=__lowerCamelCase , )
idx += 1
lowerCamelCase__ : Optional[Any] = model_path_to_save + f"_{idx}"
@classmethod
def lowerCAmelCase ( cls : Dict , __lowerCamelCase : Optional[Union[str, os.PathLike]] , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase__ : int = pretrained_model_path
while os.path.isdir(__lowerCamelCase ):
lowerCamelCase__ : int = ControlNetModel.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
controlnets.append(__lowerCamelCase )
idx += 1
lowerCamelCase__ : int = pretrained_model_path + f"_{idx}"
logger.info(f"{len(__lowerCamelCase )} controlnets loaded from {pretrained_model_path}." )
if len(__lowerCamelCase ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__lowerCamelCase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__lowerCamelCase )
| 703
|
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :List[str] = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : Union[str, Any] = 'layoutlmv3'
def __init__( self : Union[str, Any] , __lowercase : Dict=50_265 , __lowercase : int=768 , __lowercase : Union[str, Any]=12 , __lowercase : str=12 , __lowercase : List[str]=3_072 , __lowercase : str="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=512 , __lowercase : List[str]=2 , __lowercase : Tuple=0.0_2 , __lowercase : str=1e-5 , __lowercase : List[str]=1 , __lowercase : List[str]=0 , __lowercase : List[str]=2 , __lowercase : Union[str, Any]=1_024 , __lowercase : List[str]=128 , __lowercase : Union[str, Any]=128 , __lowercase : List[str]=True , __lowercase : Union[str, Any]=32 , __lowercase : Optional[int]=128 , __lowercase : List[Any]=64 , __lowercase : Any=256 , __lowercase : List[str]=True , __lowercase : List[str]=True , __lowercase : Optional[Any]=True , __lowercase : Dict=224 , __lowercase : str=3 , __lowercase : Union[str, Any]=16 , __lowercase : Dict=None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(
vocab_size=__lowercase , hidden_size=__lowercase , num_hidden_layers=__lowercase , num_attention_heads=__lowercase , intermediate_size=__lowercase , hidden_act=__lowercase , hidden_dropout_prob=__lowercase , attention_probs_dropout_prob=__lowercase , max_position_embeddings=__lowercase , type_vocab_size=__lowercase , initializer_range=__lowercase , layer_norm_eps=__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
__UpperCAmelCase : Optional[int] = max_ad_position_embeddings
__UpperCAmelCase : List[Any] = coordinate_size
__UpperCAmelCase : Tuple = shape_size
__UpperCAmelCase : Dict = has_relative_attention_bias
__UpperCAmelCase : str = rel_pos_bins
__UpperCAmelCase : str = max_rel_pos
__UpperCAmelCase : int = has_spatial_attention_bias
__UpperCAmelCase : str = rel_ad_pos_bins
__UpperCAmelCase : Optional[int] = max_rel_ad_pos
__UpperCAmelCase : Any = text_embed
__UpperCAmelCase : Union[str, Any] = visual_embed
__UpperCAmelCase : int = input_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : List[Any] = patch_size
__UpperCAmelCase : List[Any] = classifier_dropout
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : int = version.parse('1.12' )
@property
def A_ ( self : List[Any] ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
return 1e-5
@property
def A_ ( self : Dict ):
'''simple docstring'''
return 12
def A_ ( self : Union[str, Any] , __lowercase : "ProcessorMixin" , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 40 , __lowercase : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , __lowercase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase : List[str] = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase : Dict = processor.tokenizer.num_special_tokens_to_add(__lowercase )
__UpperCAmelCase : int = compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase : Tuple = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__UpperCAmelCase : str = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__UpperCAmelCase : str = self._generate_dummy_images(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCAmelCase : Tuple = dict(
processor(
__lowercase , text=__lowercase , boxes=__lowercase , return_tensors=__lowercase , ) )
return inputs
| 522
|
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = '#'
class snake_case :
'''simple docstring'''
def __init__( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : dict = {}
def A_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self._trie
for char in text:
if char not in trie:
__UpperCAmelCase : int = {}
__UpperCAmelCase : Dict = trie[char]
__UpperCAmelCase : int = True
def A_ ( self : List[Any] , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._trie
for char in prefix:
if char in trie:
__UpperCAmelCase : Optional[int] = trie[char]
else:
return []
return self._elements(__lowercase )
def A_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for c, v in d.items():
__UpperCAmelCase : Optional[int] = [''' '''] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowercase__ :Union[str, Any] = Trie()
lowercase__ :Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( UpperCAmelCase_ ) ->tuple:
"""simple docstring"""
__UpperCAmelCase : int = trie.find_word(UpperCAmelCase_ )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( ) ->None:
"""simple docstring"""
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 522
| 1
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=1 , snake_case_=False , **snake_case_ ):
"""simple docstring"""
super().__init__(**_snake_case )
A_ : List[Any] = vocab_size
A_ : List[str] = d_embed
A_ : Optional[Any] = d_proj
A_ : str = cutoffs + [vocab_size]
A_ : Tuple = [0] + self.cutoffs
A_ : Tuple = div_val
A_ : Optional[int] = self.cutoffs[0]
A_ : int = len(self.cutoffs ) - 1
A_ : int = self.shortlist_size + self.n_clusters
A_ : List[Any] = keep_order
A_ : str = []
A_ : Union[str, Any] = []
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.n_clusters > 0:
A_ : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_snake_case , name='cluster_weight' )
A_ : List[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=_snake_case , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=F"""out_projs_._{i}""" , )
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
A_ : Optional[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_snake_case , name=F"""out_layers_._{i}_._weight""" , )
A_ : str = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=_snake_case , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Dict = self.d_embed // (self.div_val**i)
A_ : Union[str, Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_snake_case , name=F"""out_projs_._{i}""" )
self.out_projs.append(_snake_case )
A_ : Optional[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_snake_case , name=F"""out_layers_._{i}_._weight""" , )
A_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_snake_case , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = x
if proj is not None:
A_ : Any = tf.einsum('ibd,ed->ibe' , _snake_case , _snake_case )
return tf.einsum('ibd,nd->ibn' , _snake_case , _snake_case ) + b
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Any = shape_list(_snake_case )
A_ : List[Any] = tf.range(lp_size[0] , dtype=target.dtype )
A_ : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(_snake_case , _snake_case )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=True , snake_case_=False ):
"""simple docstring"""
A_ : Optional[Any] = 0
if self.n_clusters == 0:
A_ : List[Any] = self._logit(_snake_case , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A_ : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case , logits=_snake_case )
A_ : Dict = tf.nn.log_softmax(_snake_case , axis=-1 )
else:
A_ : str = shape_list(_snake_case )
A_ : Dict = []
A_ : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ : str = (target >= l_idx) & (target < r_idx)
A_ : Any = tf.where(_snake_case )
A_ : List[Any] = tf.boolean_mask(_snake_case , _snake_case ) - l_idx
if self.div_val == 1:
A_ : int = self.out_layers[0][0][l_idx:r_idx]
A_ : Tuple = self.out_layers[0][1][l_idx:r_idx]
else:
A_ : Tuple = self.out_layers[i][0]
A_ : List[Any] = self.out_layers[i][1]
if i == 0:
A_ : Any = tf.concat([cur_W, self.cluster_weight] , 0 )
A_ : Optional[int] = tf.concat([cur_b, self.cluster_bias] , 0 )
A_ : Any = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[0] )
A_ : Dict = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ : Any = tf.boolean_mask(_snake_case , _snake_case )
A_ : Any = self._gather_logprob(_snake_case , _snake_case )
else:
A_ : Any = self._logit(_snake_case , _snake_case , _snake_case , self.out_projs[i] )
A_ : Tuple = tf.nn.log_softmax(_snake_case )
A_ : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
A_ : List[Any] = tf.boolean_mask(_snake_case , _snake_case )
A_ : Optional[Any] = tf.boolean_mask(_snake_case , _snake_case )
A_ : int = self._gather_logprob(_snake_case , _snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case , -cur_logprob , shape_list(_snake_case ) )
A_ : List[str] = tf.concat(_snake_case , axis=-1 )
if target is not None:
if return_mean:
A_ : str = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 708
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A_ : str = []
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : List[Any] = 2
@register_to_config
def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = 0.0_00_85 , snake_case_ = 0.0_12 , snake_case_ = "linear" , snake_case_ = None , snake_case_ = "epsilon" , snake_case_ = False , snake_case_ = False , snake_case_ = 1.0 , snake_case_ = "linspace" , snake_case_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
A_ : Optional[int] = torch.tensor(snake_case_ , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ : List[Any] = torch.linspace(snake_case_ , snake_case_ , snake_case_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : List[str] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A_ : Union[str, Any] = betas_for_alpha_bar(snake_case_ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
A_ : Dict = 1.0 - self.betas
A_ : Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = use_karras_sigmas
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if schedule_timesteps is None:
A_ : List[str] = self.timesteps
A_ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ : Tuple = 1 if len(snake_case_ ) > 1 else 0
else:
A_ : Optional[int] = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
A_ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[int] = self.index_for_timestep(snake_case_ )
A_ : Union[str, Any] = self.sigmas[step_index]
A_ : List[str] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Tuple = num_inference_steps
A_ : Any = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ : Dict = np.linspace(0 , num_train_timesteps - 1 , snake_case_ , dtype=snake_case_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : str = (np.arange(0 , snake_case_ ) * step_ratio).round()[::-1].copy().astype(snake_case_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ : Optional[Any] = (np.arange(snake_case_ , 0 , -step_ratio )).round().copy().astype(snake_case_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
A_ : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ : int = np.log(snake_case_ )
A_ : str = np.interp(snake_case_ , np.arange(0 , len(snake_case_ ) ) , snake_case_ )
if self.config.use_karras_sigmas:
A_ : Union[str, Any] = self._convert_to_karras(in_sigmas=snake_case_ , num_inference_steps=self.num_inference_steps )
A_ : Dict = np.array([self._sigma_to_t(snake_case_ , snake_case_ ) for sigma in sigmas] )
A_ : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ : List[Any] = torch.from_numpy(snake_case_ ).to(device=snake_case_ )
A_ : str = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A_ : int = torch.from_numpy(snake_case_ )
A_ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case_ ).startswith('mps' ):
# mps does not support float64
A_ : int = timesteps.to(snake_case_ , dtype=torch.floataa )
else:
A_ : Tuple = timesteps.to(device=snake_case_ )
# empty dt and derivative
A_ : Optional[int] = None
A_ : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ : Any = defaultdict(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = np.log(snake_case_ )
# get distribution
A_ : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A_ : Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A_ : Tuple = low_idx + 1
A_ : Dict = log_sigmas[low_idx]
A_ : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
A_ : Any = (low - log_sigma) / (low - high)
A_ : Optional[int] = np.clip(snake_case_ , 0 , 1 )
# transform interpolation to time range
A_ : str = (1 - w) * low_idx + w * high_idx
A_ : Optional[int] = t.reshape(sigma.shape )
return t
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : float = in_sigmas[-1].item()
A_ : float = in_sigmas[0].item()
A_ : str = 7.0 # 7.0 is the value used in the paper
A_ : str = np.linspace(0 , 1 , snake_case_ )
A_ : List[str] = sigma_min ** (1 / rho)
A_ : int = sigma_max ** (1 / rho)
A_ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.dt is None
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ):
"""simple docstring"""
A_ : Dict = self.index_for_timestep(snake_case_ )
# advance index counter by 1
A_ : Tuple = timestep.cpu().item() if torch.is_tensor(snake_case_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ : Tuple = self.sigmas[step_index]
A_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A_ : str = self.sigmas[step_index - 1]
A_ : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ : Tuple = 0
A_ : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ : str = sigma_hat if self.state_in_first_order else sigma_next
A_ : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ : Tuple = sigma_hat if self.state_in_first_order else sigma_next
A_ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A_ : Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
A_ : Union[str, Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ : str = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ : str = sigma_next - sigma_hat
# store for 2nd order step
A_ : Optional[Any] = derivative
A_ : Union[str, Any] = dt
A_ : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
A_ : List[Any] = (sample - pred_original_sample) / sigma_next
A_ : Optional[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A_ : List[Any] = self.dt
A_ : Optional[Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A_ : List[str] = None
A_ : Tuple = None
A_ : str = None
A_ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
A_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case_ ):
# mps does not support float64
A_ : str = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ : List[str] = self.timesteps.to(original_samples.device )
A_ : int = timesteps.to(original_samples.device )
A_ : Union[str, Any] = [self.index_for_timestep(snake_case_ , snake_case_ ) for t in timesteps]
A_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ : List[Any] = sigma.unsqueeze(-1 )
A_ : List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 302
| 0
|
def __a ( A__ : int = 1000 ):
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 16
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 25
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ) ->Optional[int]:
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCAmelCase_ )
if decoder_head_mask is None:
__UpperCAmelCase : Optional[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
if cross_attn_head_mask is None:
__UpperCAmelCase : Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCAmelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class snake_case :
'''simple docstring'''
def __init__( self : List[str] , __lowercase : List[str] , __lowercase : Optional[Any]=13 , __lowercase : Tuple=7 , __lowercase : str=True , __lowercase : Dict=False , __lowercase : Tuple=99 , __lowercase : List[Any]=16 , __lowercase : int=2 , __lowercase : Optional[Any]=4 , __lowercase : Tuple=4 , __lowercase : Any="relu" , __lowercase : Any=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.0 , __lowercase : List[str]=0.0 , __lowercase : Optional[int]=20 , __lowercase : int=2 , __lowercase : Tuple=1 , __lowercase : Tuple=0 , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[int] = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Any = encoder_layerdrop
__UpperCAmelCase : List[str] = decoder_layerdrop
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Optional[Any] = eos_token_id
__UpperCAmelCase : Tuple = pad_token_id
__UpperCAmelCase : str = bos_token_id
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = self.eos_token_id # Eos Token
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase : Dict = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase : List[Any] = self.get_config()
__UpperCAmelCase : Tuple = prepare_mam_aaa_inputs_dict(__lowercase , __lowercase , __lowercase )
return config, inputs_dict
def A_ ( self : Any ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : str , __lowercase : Optional[int] , __lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MaMaaaModel(config=__lowercase ).get_decoder().to(__lowercase ).eval()
__UpperCAmelCase : Optional[Any] = inputs_dict['''input_ids''']
__UpperCAmelCase : Dict = inputs_dict['''attention_mask''']
__UpperCAmelCase : List[Any] = inputs_dict['''head_mask''']
# first forward pass
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase , head_mask=__lowercase , use_cache=__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__UpperCAmelCase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Optional[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__UpperCAmelCase : Dict = model(__lowercase , attention_mask=__lowercase )['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-2 ) )
def A_ ( self : int , __lowercase : Dict , __lowercase : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = MaMaaaModel(config=__lowercase ).to(__lowercase ).eval()
__UpperCAmelCase : Optional[int] = model(**__lowercase )
__UpperCAmelCase : Optional[int] = outputs.encoder_last_hidden_state
__UpperCAmelCase : Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__lowercase )
__UpperCAmelCase : Optional[Any] = MaMaaaEncoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase : List[str] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[int] = model.get_decoder()
decoder.save_pretrained(__lowercase )
__UpperCAmelCase : Any = MaMaaaDecoder.from_pretrained(__lowercase ).to(__lowercase )
__UpperCAmelCase : str = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=__lowercase , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_A : Tuple = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_A : Tuple = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_A : Union[str, Any] = True
_A : int = True
_A : Tuple = False
_A : Tuple = False
def A_ ( self : List[str] , __lowercase : List[str] , __lowercase : Any , __lowercase : List[str] , __lowercase : str , __lowercase : List[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = MaMaaaModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=__lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = model_class.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertEqual(info['''missing_keys'''] , [] )
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__lowercase )
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__lowercase )
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__UpperCAmelCase : Tuple = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase ) )
if not self.is_encoder_decoder:
__UpperCAmelCase : str = inputs['''input_ids''']
del inputs["input_ids"]
else:
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids''']
__UpperCAmelCase : Tuple = inputs.get('''decoder_input_ids''' , __lowercase )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , __lowercase )
__UpperCAmelCase : Union[str, Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__UpperCAmelCase : int = wte(__lowercase )
else:
__UpperCAmelCase : Union[str, Any] = wte(__lowercase )
__UpperCAmelCase : Optional[Any] = wte(__lowercase )
with torch.no_grad():
model(**__lowercase )[0]
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase : int = input_dict['''input_ids''']
__UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__lowercase )
__UpperCAmelCase : Optional[Any] = MaMaaaForConditionalGeneration(__lowercase ).eval().to(__lowercase )
if torch_device == "cuda":
model.half()
model.generate(__lowercase , attention_mask=__lowercase )
model.generate(num_beams=4 , do_sample=__lowercase , early_stopping=__lowercase , num_return_sequences=3 )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->Optional[Any]:
"""simple docstring"""
return torch.tensor(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ )
lowercase__ :Dict = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A_ ( self : Tuple ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def A_ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(__lowercase )
__UpperCAmelCase : Dict = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase : Optional[Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase : str = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**__lowercase )[0]
__UpperCAmelCase : str = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__UpperCAmelCase : str = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) )
def A_ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__lowercase )
# change to intended input
__UpperCAmelCase : Any = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__UpperCAmelCase : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__UpperCAmelCase : Optional[Any] = prepare_mam_aaa_inputs_dict(model.config , __lowercase , __lowercase )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**__lowercase )[0]
__UpperCAmelCase : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=__lowercase ) )
def A_ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(__lowercase )
__UpperCAmelCase : Tuple = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
__UpperCAmelCase : Optional[Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__UpperCAmelCase : Tuple = tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = model.generate(
input_ids=dct['''input_ids'''].to(__lowercase ) , attention_mask=dct['''attention_mask'''].to(__lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
__UpperCAmelCase : Any = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__lowercase , skip_special_tokens=__lowercase )
assert generated == expected_en
| 374
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Tuple = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 374
| 1
|
'''simple docstring'''
from manim import *
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(*A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = VGroup(A__ , A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""CPU""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A__ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE = VGroup(*A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""GPU""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
gpu.move_to([-1, -1, 0] )
self.add(A__ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""Model""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(A__ , A__ ).arrange(A__ , buff=0.5 , aligned_edge=A__ )
model.move_to([3, -1.0, 0] )
self.add(A__ )
_SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(A__ ):
rect.set_stroke(A__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=A__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=A__ , buff=0.0 )
self.add(A__ )
cpu_targs.append(A__ )
_SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE = VGroup(*A__ ).arrange(A__ , buff=0 )
_SCREAMING_SNAKE_CASE = Text("""Loaded Checkpoint""" , font_size=24 )
_SCREAMING_SNAKE_CASE = Group(A__ , A__ ).arrange(A__ , aligned_edge=A__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A__ , A__ )
_SCREAMING_SNAKE_CASE = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(A__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_SCREAMING_SNAKE_CASE = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A__ ) , Write(A__ ) )
self.play(Write(A__ , run_time=1 ) , Create(A__ , run_time=1 ) )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = fill.copy().set_fill(A__ , opacity=0.7 )
target.move_to(A__ )
first_animations.append(GrowFromCenter(A__ , run_time=1 ) )
_SCREAMING_SNAKE_CASE = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(A__ , run_time=1.5 ) )
self.play(*A__ )
self.play(*A__ )
self.wait()
| 591
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array([[1, item, train_mtch[i]] for i, item in enumerate(SCREAMING_SNAKE_CASE_ )] )
_SCREAMING_SNAKE_CASE = np.array(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , SCREAMING_SNAKE_CASE_ ) ) , x.transpose() ) , SCREAMING_SNAKE_CASE_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (1, 2, 1)
_SCREAMING_SNAKE_CASE = (1, 1, 0, 7)
_SCREAMING_SNAKE_CASE = SARIMAX(
SCREAMING_SNAKE_CASE_ , exog=SCREAMING_SNAKE_CASE_ , order=SCREAMING_SNAKE_CASE_ , seasonal_order=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = model.fit(disp=SCREAMING_SNAKE_CASE_ , maxiter=6_00 , method="""nm""" )
_SCREAMING_SNAKE_CASE = model_fit.predict(1 , len(SCREAMING_SNAKE_CASE_ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = regressor.predict(SCREAMING_SNAKE_CASE_ )
return y_pred[0]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
train_user.sort()
_SCREAMING_SNAKE_CASE = np.percentile(SCREAMING_SNAKE_CASE_ , 25 )
_SCREAMING_SNAKE_CASE = np.percentile(SCREAMING_SNAKE_CASE_ , 75 )
_SCREAMING_SNAKE_CASE = qa - qa
_SCREAMING_SNAKE_CASE = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for i in list_vote:
if i > actual_result:
_SCREAMING_SNAKE_CASE = not_safe + 1
else:
if abs(abs(SCREAMING_SNAKE_CASE_ ) - abs(SCREAMING_SNAKE_CASE_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase__ : Tuple = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase__ : Tuple = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCamelCase__ : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase__ : Tuple = normalize_df[:, 2].tolist()
UpperCamelCase__ : str = normalize_df[:, 0].tolist()
UpperCamelCase__ : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase__ : str = normalize_df[:, [1, 2]].tolist()
UpperCamelCase__ : Optional[int] = x[: len(x) - 1]
UpperCamelCase__ : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase__ : List[Any] = total_date[: len(total_date) - 1]
UpperCamelCase__ : str = total_user[: len(total_user) - 1]
UpperCamelCase__ : Union[str, Any] = total_match[: len(total_match) - 1]
UpperCamelCase__ : Optional[Any] = total_date[len(total_date) - 1 :]
UpperCamelCase__ : List[str] = total_user[len(total_user) - 1 :]
UpperCamelCase__ : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase__ : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase__ : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 591
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.encodec')
SCREAMING_SNAKE_CASE__ = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
SCREAMING_SNAKE_CASE__ = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
SCREAMING_SNAKE_CASE__ = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
SCREAMING_SNAKE_CASE__ = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
SCREAMING_SNAKE_CASE__ = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
SCREAMING_SNAKE_CASE__ = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Dict:
for attribute in key.split(""".""" ):
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
UpperCamelCase = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "weight_ih_l0":
UpperCamelCase = value
elif weight_type == "weight_hh_l0":
UpperCamelCase = value
elif weight_type == "bias_ih_l0":
UpperCamelCase = value
elif weight_type == "bias_hh_l0":
UpperCamelCase = value
elif weight_type == "weight_ih_l1":
UpperCamelCase = value
elif weight_type == "weight_hh_l1":
UpperCamelCase = value
elif weight_type == "bias_ih_l1":
UpperCamelCase = value
elif weight_type == "bias_hh_l1":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCamelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
UpperCamelCase = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(F"{name} was ignored" )
continue
UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
UpperCamelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(__UpperCamelCase )[0].split(""".""" )[-2]
UpperCamelCase = mapped_key.replace("""*""" , __UpperCamelCase )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "weight_ih_l0" in name:
UpperCamelCase = 'weight_ih_l0'
elif "weight_hh_l0" in name:
UpperCamelCase = 'weight_hh_l0'
elif "bias_ih_l0" in name:
UpperCamelCase = 'bias_ih_l0'
elif "bias_hh_l0" in name:
UpperCamelCase = 'bias_hh_l0'
elif "weight_ih_l1" in name:
UpperCamelCase = 'weight_ih_l1'
elif "weight_hh_l1" in name:
UpperCamelCase = 'weight_hh_l1'
elif "bias_ih_l1" in name:
UpperCamelCase = 'bias_ih_l1'
elif "bias_hh_l1" in name:
UpperCamelCase = 'bias_hh_l1'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = EncodecConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
UpperCamelCase = [8, 5, 4, 4]
UpperCamelCase = [2.2]
UpperCamelCase = 64
UpperCamelCase = 32000
UpperCamelCase = 2048
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
elif model_name == "encodec_48khz":
UpperCamelCase = [8, 5, 4, 2]
UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
UpperCamelCase = 48000
UpperCamelCase = 2
UpperCamelCase = False
UpperCamelCase = 'time_group_norm'
UpperCamelCase = True
UpperCamelCase = 1.0
UpperCamelCase = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
UpperCamelCase = EncodecModel(__UpperCamelCase )
UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
UpperCamelCase = original_checkpoint['best_state']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 716
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(__UpperCamelCase ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35
| 0
|
import heapq
import sys
import numpy as np
lowercase : Any = tuple[int, int]
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> int:
snake_case_ : Any = []
snake_case_ : Any = set()
def _lowerCAmelCase ( self ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def _lowerCAmelCase ( self ) -> int:
return len(self.elements ) == 0
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
snake_case_ : List[Any] = []
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((snake_case_) , (snake_case_)) : Optional[int] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any:
if item in self.set:
self.set.remove(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = []
((snake_case_) , (snake_case_)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((snake_case_) , (snake_case_)) : Optional[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowerCAmelCase ( self ) -> Optional[Any]:
return self.elements[0][1]
def _lowerCAmelCase ( self ) -> Any:
((snake_case_) , (snake_case_)) : Dict = heapq.heappop(self.elements )
self.set.remove(_SCREAMING_SNAKE_CASE )
return (priority, item)
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# euclidean distance
snake_case_ : List[Any] = np.array(_a )
snake_case_ : str = np.array(_a )
return np.linalg.norm(a - b )
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# integer division by time variable
return consistent_heuristic(_a , _a ) // t
def lowerCAmelCase__ ( _a : TPos , _a : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase__ ( _a : TPos , _a : int , _a : TPos , _a : dict[TPos, float] ):
snake_case_ : List[Any] = g_function[start] + Wa * heuristics[i](_a , _a )
return ans
def lowerCAmelCase__ ( _a : Tuple , _a : List[Any] , _a : List[Any] ):
snake_case_ : Union[str, Any] = np.chararray((n, n) )
for i in range(_a ):
for j in range(_a ):
snake_case_ : Tuple = "*"
for i in range(_a ):
for j in range(_a ):
if (j, (n - 1) - i) in blocks:
snake_case_ : Optional[int] = "#"
snake_case_ : int = "-"
snake_case_ : Union[str, Any] = back_pointer[goal]
while x != start:
((snake_case_) , (snake_case_)) : str = x
# print(x)
snake_case_ : str = "-"
snake_case_ : Optional[int] = back_pointer[x]
snake_case_ : List[str] = "-"
for i in range(_a ):
for j in range(_a ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
snake_case_ : int = back_pointer[goal]
while x != start:
print(_a , end=" " )
snake_case_ : List[str] = back_pointer[x]
print(_a )
sys.exit()
def lowerCAmelCase__ ( _a : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase__ ( _a : Dict , _a : Tuple , _a : int , _a : Tuple , _a : Dict , _a : Any , _a : Union[str, Any] , _a : List[Any] , ):
for itera in range(_a ):
open_list[itera].remove_element(_a )
# print("s", s)
# print("j", j)
((snake_case_) , (snake_case_)) : Any = s
snake_case_ : Optional[int] = (x - 1, y)
snake_case_ : List[Any] = (x + 1, y)
snake_case_ : Union[str, Any] = (x, y + 1)
snake_case_ : Dict = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_a ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_a )
snake_case_ : List[str] = -1
snake_case_ : List[Any] = float("inf" )
if valid(_a ) and g_function[neighbours] > g_function[s] + 1:
snake_case_ : List[str] = g_function[s] + 1
snake_case_ : str = s
if neighbours not in close_list_anchor:
open_list[0].put(_a , key(_a , 0 , _a , _a ) )
if neighbours not in close_list_inad:
for var in range(1 , _a ):
if key(_a , _a , _a , _a ) <= Wa * key(
_a , 0 , _a , _a ):
open_list[j].put(
_a , key(_a , _a , _a , _a ) )
def lowerCAmelCase__ ( ):
snake_case_ : List[str] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowercase : Union[str, Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowercase : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowercase : List[Any] = make_common_ground()
lowercase : Union[str, Any] = blocks_blk
# hyper parameters
lowercase : Dict = 1
lowercase : Tuple = 1
lowercase : str = 20
lowercase : List[str] = 3 # one consistent and two other inconsistent
# start and end destination
lowercase : Any = (0, 0)
lowercase : Union[str, Any] = (n - 1, n - 1)
lowercase : Optional[int] = 1
def lowerCAmelCase__ ( _a : TPos , _a : TPos , _a : int ):
snake_case_ : Optional[Any] = {start: 0, goal: float("inf" )}
snake_case_ : Optional[Any] = {start: -1, goal: -1}
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = set()
for i in range(_a ):
open_list.append(PriorityQueue() )
open_list[i].put(_a , key(_a , _a , _a , _a ) )
snake_case_ : list[int] = []
snake_case_ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _a ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ , snake_case_ : str = open_list[i].top_show()
visited.add(_a )
expand_state(
_a , _a , _a , _a , _a , _a , _a , _a , )
close_list_inad.append(_a )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_a , _a , _a )
else:
snake_case_ : Optional[Any] = open_list[0].top_show()
visited.add(_a )
expand_state(
_a , 0 , _a , _a , _a , _a , _a , _a , )
close_list_anchor.append(_a )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_a ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 568
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : int = None
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''google/rembert''': 2_56,
}
lowercase : Dict = '''▁'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
snake_case_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 568
| 1
|
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 427
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCamelCase__ ( ):
__snake_case = torch.nn.Linear(2 , 4 )
__snake_case = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case = torch.optim.lr_scheduler.OneCycleLR(a , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCamelCase__ ( a ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCamelCase__ ( a ):
__snake_case = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(a )
class a_ ( UpperCAmelCase__ ):
@require_cuda
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowerCAmelCase ):
__snake_case = Accelerator(cpu=__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case = GradientState()
assert state.num_steps == 1
__snake_case = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self : Optional[Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowercase__ ( self : Optional[int] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowercase__ ( self : Union[str, Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__lowerCAmelCase : List[str] , **__lowerCAmelCase : Any ):
pass
with patch('torch.cuda.set_device' , __lowerCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__snake_case = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__snake_case = get_signature(__lowerCAmelCase )
# saving hook
def save_config(__lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : str ):
__snake_case = {'class_name': models[0].__class__.__name__}
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# loading hook
def load_config(__lowerCAmelCase : int , __lowerCAmelCase : str ):
with open(os.path.join(__lowerCAmelCase , 'data.json' ) , 'r' ) as f:
__snake_case = json.load(__lowerCAmelCase )
__snake_case = config['class_name']
__snake_case = accelerator.register_save_state_pre_hook(__lowerCAmelCase )
__snake_case = accelerator.register_load_state_pre_hook(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowerCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowerCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__lowerCAmelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowercase__ ( self : Union[str, Any] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(dummy_obj is None )
def lowercase__ ( self : List[str] ):
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = create_components()
__snake_case = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(__lowerCAmelCase , '_is_accelerate_prepared' , __lowerCAmelCase ) , __lowerCAmelCase , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def lowercase__ ( self : Optional[int] ):
from transformers import AutoModelForCausalLM
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map={'': 0} , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
def lowercase__ ( self : List[Any] ):
from transformers import AutoModelForCausalLM
__snake_case = Accelerator()
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 'cpu'
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=__lowerCAmelCase , load_in_abit=__lowerCAmelCase , llm_inta_enable_fpaa_cpu_offload=__lowerCAmelCase )
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
__snake_case = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowerCAmelCase ):
__snake_case = accelerator.prepare(__lowerCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self : Any ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__snake_case = infer_auto_device_map(__lowerCAmelCase )
__snake_case = 1
__snake_case = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=__lowerCAmelCase , device_map=__lowerCAmelCase , )
__snake_case = Accelerator()
# This should work
__snake_case = accelerator.prepare(__lowerCAmelCase )
@require_cuda
def lowercase__ ( self : str ):
__snake_case = torch.nn.Linear(1_0 , 1_0 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case = Accelerator(cpu=__lowerCAmelCase )
__snake_case = accelerator.prepare(__lowerCAmelCase )
| 427
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
lowercase__ = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowercase__ = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
lowercase__ = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def __snake_case ( lowercase : int ):
if isinstance(lowercase , lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def __snake_case ( lowercase : List[Any] , lowercase : List[Any] , lowercase : Any , lowercase : int , lowercase : Optional[int]=False ):
snake_case_ = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
snake_case_ = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
snake_case_ = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
snake_case_ = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
snake_case_ = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
snake_case_ = checkpoint[f'''{old_prefix}.skip_connection.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def __snake_case ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Dict=None ):
snake_case_ , snake_case_ , snake_case_ = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
snake_case_ , snake_case_ , snake_case_ = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
snake_case_ = checkpoint[f'''{old_prefix}.norm.weight''']
snake_case_ = checkpoint[f'''{old_prefix}.norm.bias''']
snake_case_ = weight_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_q.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_k.squeeze(-1 ).squeeze(-1 )
snake_case_ = weight_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = bias_v.squeeze(-1 ).squeeze(-1 )
snake_case_ = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
snake_case_ = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __snake_case ( lowercase : str , lowercase : Optional[int] ):
snake_case_ = torch.load(lowercase , map_location="cpu" )
snake_case_ = {}
snake_case_ = checkpoint["time_embed.0.weight"]
snake_case_ = checkpoint["time_embed.0.bias"]
snake_case_ = checkpoint["time_embed.2.weight"]
snake_case_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
snake_case_ = checkpoint["label_emb.weight"]
snake_case_ = checkpoint["input_blocks.0.0.weight"]
snake_case_ = checkpoint["input_blocks.0.0.bias"]
snake_case_ = unet_config["down_block_types"]
snake_case_ = unet_config["layers_per_block"]
snake_case_ = unet_config["attention_head_dim"]
snake_case_ = unet_config["block_out_channels"]
snake_case_ = 1
snake_case_ = channels_list[0]
for i, layer_type in enumerate(lowercase ):
snake_case_ = channels_list[i]
snake_case_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase ):
snake_case_ = f'''down_blocks.{i}.resnets.{j}'''
snake_case_ = f'''input_blocks.{current_layer}.0'''
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase ):
snake_case_ = f'''down_blocks.{i}.resnets.{j}'''
snake_case_ = f'''input_blocks.{current_layer}.0'''
snake_case_ = True if j == 0 and downsample_block_has_skip else False
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase )
snake_case_ = f'''down_blocks.{i}.attentions.{j}'''
snake_case_ = f'''input_blocks.{current_layer}.1'''
snake_case_ = convert_attention(
lowercase , lowercase , lowercase , lowercase , lowercase )
current_layer += 1
if i != len(lowercase ) - 1:
snake_case_ = f'''down_blocks.{i}.downsamplers.0'''
snake_case_ = f'''input_blocks.{current_layer}.0'''
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase )
current_layer += 1
snake_case_ = current_channels
# hardcoded the mid-block for now
snake_case_ = "mid_block.resnets.0"
snake_case_ = "middle_block.0"
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase )
snake_case_ = "mid_block.attentions.0"
snake_case_ = "middle_block.1"
snake_case_ = convert_attention(lowercase , lowercase , lowercase , lowercase , lowercase )
snake_case_ = "mid_block.resnets.1"
snake_case_ = "middle_block.2"
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase )
snake_case_ = 0
snake_case_ = unet_config["up_block_types"]
for i, layer_type in enumerate(lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = f'''up_blocks.{i}.resnets.{j}'''
snake_case_ = f'''output_blocks.{current_layer}.0'''
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase )
current_layer += 1
if i != len(lowercase ) - 1:
snake_case_ = f'''up_blocks.{i}.upsamplers.0'''
snake_case_ = f'''output_blocks.{current_layer-1}.1'''
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
snake_case_ = f'''up_blocks.{i}.resnets.{j}'''
snake_case_ = f'''output_blocks.{current_layer}.0'''
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase , has_skip=lowercase )
snake_case_ = f'''up_blocks.{i}.attentions.{j}'''
snake_case_ = f'''output_blocks.{current_layer}.1'''
snake_case_ = convert_attention(
lowercase , lowercase , lowercase , lowercase , lowercase )
current_layer += 1
if i != len(lowercase ) - 1:
snake_case_ = f'''up_blocks.{i}.upsamplers.0'''
snake_case_ = f'''output_blocks.{current_layer-1}.2'''
snake_case_ = convert_resnet(lowercase , lowercase , lowercase , lowercase )
snake_case_ = checkpoint["out.0.weight"]
snake_case_ = checkpoint["out.0.bias"]
snake_case_ = checkpoint["out.2.weight"]
snake_case_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
lowercase__ = parser.parse_args()
lowercase__ = strabool(args.class_cond)
lowercase__ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
lowercase__ = None
lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
lowercase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 508
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 508
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""microsoft/unispeech-large-1500h-cv""": (
"""https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"""
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'unispeech'
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1e-5 , lowercase="group" , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=320 , lowercase=2 , lowercase=0.1 , lowercase=100 , lowercase=256 , lowercase=256 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=256 , lowercase=80 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=0.5 , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(lowercase )
A__ = list(lowercase )
A__ = list(lowercase )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = num_ctc_classes
A__ = vocab_size
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
A__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# pretraining loss
A__ = replace_prob
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 626
|
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Any = "git_vision_model"
def __init__( self : str , __UpperCAmelCase : Any=768 , __UpperCAmelCase : Optional[int]=3_072 , __UpperCAmelCase : int=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : int=224 , __UpperCAmelCase : Optional[Any]=16 , __UpperCAmelCase : int="quick_gelu" , __UpperCAmelCase : Optional[int]=1E-5 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Union[str, Any]=0.02 , **__UpperCAmelCase : int , ) -> Tuple:
super().__init__(**__UpperCAmelCase )
UpperCAmelCase_= hidden_size
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= num_channels
UpperCAmelCase_= patch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= attention_dropout
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= hidden_act
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : Union[str, os.PathLike] , **__UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
UpperCAmelCase_, UpperCAmelCase_= cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
UpperCAmelCase_= config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class lowercase ( snake_case__):
"""simple docstring"""
a__ : Optional[int] = "git"
def __init__( self : List[str] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=30_522 , __UpperCAmelCase : List[Any]=768 , __UpperCAmelCase : Any=6 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Optional[int]=3_072 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=1_024 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : str=1E-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Optional[Any]="absolute" , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=False , __UpperCAmelCase : str=101 , __UpperCAmelCase : int=102 , __UpperCAmelCase : Any=None , **__UpperCAmelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
if vision_config is None:
UpperCAmelCase_= {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
UpperCAmelCase_= GitVisionConfig(**__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= hidden_act
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= use_cache
UpperCAmelCase_= tie_word_embeddings
UpperCAmelCase_= num_image_with_embedding
UpperCAmelCase_= bos_token_id
UpperCAmelCase_= eos_token_id
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_= copy.deepcopy(self.__dict__ )
UpperCAmelCase_= self.vision_config.to_dict()
UpperCAmelCase_= self.__class__.model_type
return output
| 593
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , """width_multiplier""" ) )
class lowercase :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : List[str]="swish" , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : int=32 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Dict=10 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=0.25 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Any=0.0 , ) -> List[str]:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= image_size
UpperCAmelCase_= patch_size
UpperCAmelCase_= num_channels
UpperCAmelCase_= make_divisible(512 * width_multiplier , divisor=8 )
UpperCAmelCase_= hidden_act
UpperCAmelCase_= conv_kernel_size
UpperCAmelCase_= output_stride
UpperCAmelCase_= classifier_dropout_prob
UpperCAmelCase_= use_labels
UpperCAmelCase_= is_training
UpperCAmelCase_= num_labels
UpperCAmelCase_= initializer_range
UpperCAmelCase_= scope
UpperCAmelCase_= width_multiplier
UpperCAmelCase_= ffn_dropout
UpperCAmelCase_= attn_dropout
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_= None
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_= ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase_= MobileViTVaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any ) -> str:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= MobileViTVaForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_= model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ : str = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : int = False
a__ : Optional[int] = False
a__ : Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_= MobileViTVaModelTester(self )
UpperCAmelCase_= MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= model_class(__UpperCAmelCase )
UpperCAmelCase_= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_= [*signature.parameters.keys()]
UpperCAmelCase_= ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
def check_hidden_states_output(__UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] ):
UpperCAmelCase_= model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_= model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase_= outputs.hidden_states
UpperCAmelCase_= 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_= 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_= True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_= MobileViTVaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_= MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__UpperCAmelCase )
UpperCAmelCase_= self.default_image_processor
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase_= torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits
# verify the logits
UpperCAmelCase_= torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
UpperCAmelCase_= torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= model.to(__UpperCAmelCase )
UpperCAmelCase_= MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_= prepare_img()
UpperCAmelCase_= image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_= model(**__UpperCAmelCase )
UpperCAmelCase_= outputs.logits.detach().cpu()
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
UpperCAmelCase_= torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
UpperCAmelCase_= image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
UpperCAmelCase_= torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 593
| 1
|
"""simple docstring"""
from __future__ import annotations
def A_ ( __lowercase , __lowercase ):
UpperCamelCase_ : Dict =get_failure_array(__lowercase )
# 2) Step through text searching for pattern
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =0, 0 # index into text, pattern
while i < len(__lowercase ):
if pattern[j] == text[i]:
if j == (len(__lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase_ : Optional[Any] =failure[j - 1]
continue
i += 1
return False
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =[0]
UpperCamelCase_ : List[str] =0
UpperCamelCase_ : List[Any] =1
while j < len(__lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase_ : Dict =failure[i - 1]
continue
j += 1
failure.append(__lowercase )
return failure
if __name__ == "__main__":
# Test 1)
__SCREAMING_SNAKE_CASE = 'abc1abc12'
__SCREAMING_SNAKE_CASE = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__SCREAMING_SNAKE_CASE = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__SCREAMING_SNAKE_CASE = 'ABABX'
__SCREAMING_SNAKE_CASE = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
__SCREAMING_SNAKE_CASE = 'AAAB'
__SCREAMING_SNAKE_CASE = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
__SCREAMING_SNAKE_CASE = 'abcdabcy'
__SCREAMING_SNAKE_CASE = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
__SCREAMING_SNAKE_CASE = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 395
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def A_ ( __lowercase ):
UpperCamelCase_ : int =YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
UpperCamelCase_ : Union[str, Any] =1_92
UpperCamelCase_ : Optional[Any] =7_68
UpperCamelCase_ : Optional[int] =12
UpperCamelCase_ : Tuple =3
UpperCamelCase_ : List[str] =[8_00, 13_33]
UpperCamelCase_ : List[str] =False
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : str =3_30
UpperCamelCase_ : Dict =14
UpperCamelCase_ : List[str] =6
UpperCamelCase_ : List[str] =13_20
elif "yolos_s" in yolos_name:
UpperCamelCase_ : Optional[int] =3_84
UpperCamelCase_ : str =15_36
UpperCamelCase_ : List[Any] =12
UpperCamelCase_ : Union[str, Any] =6
elif "yolos_b" in yolos_name:
UpperCamelCase_ : Optional[Any] =[8_00, 13_44]
UpperCamelCase_ : Union[str, Any] =91
UpperCamelCase_ : Optional[Any] ='huggingface/label-files'
UpperCamelCase_ : Union[str, Any] ='coco-detection-id2label.json'
UpperCamelCase_ : Optional[Any] =json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str ={int(__lowercase ): v for k, v in idalabel.items()}
UpperCamelCase_ : str =idalabel
UpperCamelCase_ : Optional[int] ={v: k for k, v in idalabel.items()}
return config
def A_ ( __lowercase , __lowercase , __lowercase = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase_ : Optional[Any] =state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCamelCase_ : str =state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase_ : int =in_proj_weight[: config.hidden_size, :]
UpperCamelCase_ : Optional[Any] =in_proj_bias[: config.hidden_size]
UpperCamelCase_ : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase_ : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase_ : Any =in_proj_weight[-config.hidden_size :, :]
UpperCamelCase_ : List[str] =in_proj_bias[-config.hidden_size :]
def A_ ( __lowercase ):
if "backbone" in name:
UpperCamelCase_ : Any =name.replace('backbone' , 'vit' )
if "cls_token" in name:
UpperCamelCase_ : str =name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
UpperCamelCase_ : Optional[Any] =name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
UpperCamelCase_ : List[Any] =name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
UpperCamelCase_ : str =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
UpperCamelCase_ : Dict =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
UpperCamelCase_ : Optional[int] =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
UpperCamelCase_ : Optional[Any] =name.replace('attn' , 'attention.self' )
if "norm1" in name:
UpperCamelCase_ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCamelCase_ : Tuple =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
UpperCamelCase_ : List[str] =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCamelCase_ : int =name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
UpperCamelCase_ : Any =name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
UpperCamelCase_ : Union[str, Any] =name.replace('vit.norm' , 'vit.layernorm' )
return name
def A_ ( __lowercase , __lowercase ):
for key in orig_state_dict.copy().keys():
UpperCamelCase_ : List[str] =orig_state_dict.pop(__lowercase )
if "qkv" in key:
UpperCamelCase_ : Any =key.split('.' )
UpperCamelCase_ : List[str] =int(key_split[2] )
UpperCamelCase_ : Union[str, Any] =model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
UpperCamelCase_ : Optional[int] =val[:dim, :]
UpperCamelCase_ : List[str] =val[
dim : dim * 2, :
]
UpperCamelCase_ : Tuple =val[-dim:, :]
else:
UpperCamelCase_ : Optional[Any] =val[:dim]
UpperCamelCase_ : Optional[Any] =val[dim : dim * 2]
UpperCamelCase_ : Union[str, Any] =val[-dim:]
else:
UpperCamelCase_ : Dict =val
return orig_state_dict
def A_ ( ):
UpperCamelCase_ : str ='http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : Optional[int] =Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase = False ):
UpperCamelCase_ : int =get_yolos_config(__lowercase )
# load original state_dict
UpperCamelCase_ : Union[str, Any] =torch.load(__lowercase , map_location='cpu' )['model']
# load 🤗 model
UpperCamelCase_ : Dict =YolosForObjectDetection(__lowercase )
model.eval()
UpperCamelCase_ : Union[str, Any] =convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
UpperCamelCase_ : Any =8_00 if yolos_name != 'yolos_ti' else 5_12
UpperCamelCase_ : List[Any] =YolosImageProcessor(format='coco_detection' , size=__lowercase )
UpperCamelCase_ : Union[str, Any] =image_processor(images=prepare_img() , return_tensors='pt' )
UpperCamelCase_ : int =model(**__lowercase )
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =outputs.logits, outputs.pred_boxes
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =None, None
if yolos_name == "yolos_ti":
UpperCamelCase_ : List[str] =torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
UpperCamelCase_ : str =torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
UpperCamelCase_ : Tuple =torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
UpperCamelCase_ : Any =torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
UpperCamelCase_ : Tuple =torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
UpperCamelCase_ : Union[str, Any] =torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
UpperCamelCase_ : Tuple =torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
UpperCamelCase_ : Optional[Any] =torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
UpperCamelCase_ : Tuple ={
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
UpperCamelCase_ : Optional[int] =model_mapping[yolos_name]
image_processor.push_to_hub(__lowercase , organization='hustvl' )
model.push_to_hub(__lowercase , organization='hustvl' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 395
| 1
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( a_: Tuple, a_: Union[str, Any]="shi-labs/oneformer_demo" ):
with open(hf_hub_download(a_, a_, repo_type="dataset" ), "r" ) as f:
_UpperCAmelCase : List[Any] = json.load(a_ )
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = []
for key, info in class_info.items():
_UpperCAmelCase : Optional[int] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(a_ ) )
_UpperCAmelCase : Dict = thing_ids
_UpperCAmelCase : Optional[int] = class_names
return metadata
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Any=3_0 , lowerCAmelCase__ : str=4_0_0 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Tuple=1_0 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Union[str, Any]=2_5_5 , lowerCAmelCase__ : Tuple="shi-labs/oneformer_demo" , lowerCAmelCase__ : Optional[int]="ade20k_panoptic.json" , lowerCAmelCase__ : List[str]=1_0 , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : Tuple = min_resolution
_UpperCAmelCase : Any = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : str = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : List[str] = image_mean
_UpperCAmelCase : Any = image_std
_UpperCAmelCase : Any = class_info_file
_UpperCAmelCase : int = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = num_text
_UpperCAmelCase : List[str] = repo_path
# for the post_process_functions
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : Optional[Any] = 1_0
_UpperCAmelCase : List[str] = 1_0
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : int = do_reduce_labels
_UpperCAmelCase : List[str] = ignore_index
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict=False ) -> int:
"""simple docstring"""
if not batched:
_UpperCAmelCase : Dict = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Any = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
_UpperCAmelCase : Any = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
_UpperCAmelCase : Any = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[Any] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
_UpperCAmelCase : Optional[int] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase_ : Any = image_processing_class
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : str = OneFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "ignore_index" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "class_info_file" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "num_text" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "repo_path" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "metadata" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_reduce_labels" ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCAmelCase : Union[str, Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCAmelCase : Any = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]="np" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : Dict = self.image_processing_tester.num_labels
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
_UpperCAmelCase : str = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase__ ) ) * 2
_UpperCAmelCase : Optional[Any] = dict(enumerate(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : Optional[int] = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
_UpperCAmelCase : str = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
def common(lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[Any]=None ):
_UpperCAmelCase : List[Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = inputs["mask_labels"]
_UpperCAmelCase : int = inputs["class_labels"]
_UpperCAmelCase : Dict = inputs["pixel_values"]
_UpperCAmelCase : List[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = np.zeros((2_0, 5_0) )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : int = 1
_UpperCAmelCase : str = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Optional[int] = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : List[str] = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Optional[int] = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 494
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
"""simple docstring"""
import re
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]',str_ )]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = split_input(SCREAMING_SNAKE_CASE )
if upper:
_UpperCAmelCase = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
_UpperCAmelCase = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return to_simple_case(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = to_simple_case(SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,'_' )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return to_complex_case(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,'-' )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 702
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = (
"""This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."""
"""It takes two arguments named `image` which should be the original image, and `label` which should be a text """
"""describing the elements what should be identified in the segmentation mask. The tool returns the mask."""
)
lowerCAmelCase__ = """CIDAS/clipseg-rd64-refined"""
lowerCAmelCase__ = """image_segmenter"""
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ["""image""", """text"""]
lowerCAmelCase__ = ["""image"""]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['vision'] )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
return self.pre_processor(text=[label] , images=[image] , padding=a__ , return_tensors='pt' )
def __A ( self , a__ ):
with torch.no_grad():
_UpperCAmelCase = self.model(**a__ ).logits
return logits
def __A ( self , a__ ):
_UpperCAmelCase = outputs.cpu().detach().numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 494
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["""image_processor""", """tokenizer"""]
UpperCAmelCase : List[str] = """FlavaImageProcessor"""
UpperCAmelCase : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.image_processor
def __call__( self : int , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _lowercase ( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : List[str] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase_ , )
return self.image_processor
| 393
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
A_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
A_ = get_tests_dir("fixtures/vocab.json")
A_ = get_tests_dir("fixtures")
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def _lowercase ( self : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
# save in new folder
processor.save_pretrained(lowerCAmelCase_ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write(json.dumps(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
# save in new folder
processor.save_pretrained(lowerCAmelCase_ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
config_dict.pop('''processor_class''' )
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write(json.dumps(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(lowerCAmelCase_ )
# copy relevant files
copyfile(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE_ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE_ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ , use_fast=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase_ ):
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = CustomProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Dict = False
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = False
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = """AutoFeatureExtractor"""
UpperCAmelCase : Tuple = """AutoTokenizer"""
UpperCAmelCase : List[Any] = False
try:
AutoConfig.register('''custom''' , lowerCAmelCase_ )
AutoFeatureExtractor.register(lowerCAmelCase_ , lowerCAmelCase_ )
AutoTokenizer.register(lowerCAmelCase_ , slow_tokenizer_class=lowerCAmelCase_ )
AutoProcessor.register(lowerCAmelCase_ , lowerCAmelCase_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=lowerCAmelCase_ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _lowercase ( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class snake_case ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _lowercase ( cls : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(lowerCAmelCase_ )
@classmethod
def _lowercase ( cls : Dict ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase_ , '''test-processor''' ) , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(new_processor.feature_extractor , lowerCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase_ , '''test-processor-org''' ) , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(new_processor.feature_extractor , lowerCAmelCase_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = CustomProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
SCREAMING_SNAKE_CASE_ = Repository(lowerCAmelCase_ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowerCAmelCase_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase_ , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE_ = json.load(lowerCAmelCase_ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase_ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCAmelCase_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 393
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase = 500000
_lowerCamelCase , _lowerCamelCase = os.path.split(__file__)
_lowerCamelCase = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowercase ( lowerCamelCase_ : datasets.Dataset , **lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dataset.map(**lowerCamelCase_ )
@get_duration
def __lowercase ( lowerCamelCase_ : datasets.Dataset , **lowerCamelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = dataset.filter(**lowerCamelCase_ )
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(lowerCamelCase_ , "dataset.arrow" ) , lowerCamelCase_ , num_examples=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=lowerCamelCase_ )
def tokenize(lowerCamelCase_ : Union[str, Any] ):
return tokenizer(examples["text"] )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="numpy" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="pandas" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="torch" , columns="numbers" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lambda lowerCamelCase_ : None , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(lowerCamelCase_ , function=lowerCamelCase_ , batched=lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(lowerCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowerCamelCase_ , "wb" ) as f:
f.write(json.dumps(lowerCamelCase_ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 112
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
import math
def a__ ( __UpperCamelCase , __UpperCamelCase ):
return math.pow(__UpperCamelCase , 2 ) - a
def a__ ( __UpperCamelCase ):
return 2 * x
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = 2.0
while start <= a:
SCREAMING_SNAKE_CASE_ = math.pow(__UpperCamelCase , 2 )
return start
def a__ ( __UpperCamelCase , __UpperCamelCase = 9_9_9_9 , __UpperCamelCase = 0.00000000000001 ):
if a < 0:
raise ValueError("math domain error" )
SCREAMING_SNAKE_CASE_ = get_initial_point(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = value - fx(__UpperCamelCase , __UpperCamelCase ) / fx_derivative(__UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = job["started_at"]
SCREAMING_SNAKE_CASE_ = job["completed_at"]
SCREAMING_SNAKE_CASE_ = date_parser.parse(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = date_parser.parse(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = duration_in_min
return job_info
def a__ ( __UpperCamelCase , __UpperCamelCase=None ):
SCREAMING_SNAKE_CASE_ = None
if token is not None:
SCREAMING_SNAKE_CASE_ = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_ = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' , headers=__UpperCamelCase ).json()
job_time.update({job["name"]: extract_time_from_single_job(__UpperCamelCase ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
A : Optional[Any] = parser.parse_args()
A : Optional[int] = get_job_time(args.workflow_run_id)
A : Tuple = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 140
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Any = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 253
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = LayoutLMTokenizer
lowerCamelCase = LayoutLMTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
lowerCamelCase_ : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , **__magic_name__ : Dict ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = "unwanted, running"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
lowerCamelCase_ : Dict = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__magic_name__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 10, 8, 9] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
| 253
| 1
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__A = "bart"
__A = True
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
__UpperCAmelCase =AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
__UpperCAmelCase =qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase =(None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase =AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
__UpperCAmelCase =AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
__UpperCAmelCase =torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
__UpperCAmelCase =sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase =make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> Any:
"""simple docstring"""
if LOAD_DENSE_INDEX:
__UpperCAmelCase =faiss.StandardGpuResources()
__UpperCAmelCase =datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
__UpperCAmelCase =np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
__UpperCAmelCase =faiss.IndexFlatIP(128 )
__UpperCAmelCase =faiss.index_cpu_to_gpu(A_ , 1 , A_ )
wikiaab_gpu_index_flat.add(A_ ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase =(None, None)
__UpperCAmelCase =Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=A_ )
def lowercase__ ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
__UpperCAmelCase =elia["""train_eli5"""]
__UpperCAmelCase =np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
__UpperCAmelCase =faiss.IndexFlatIP(128 )
eli5_train_q_index.add(A_ )
return (elia_train, eli5_train_q_index)
__A , __A , __A = load_indexes()
__A , __A , __A , __A = load_models()
__A , __A = load_train_data()
def lowercase__ ( A_: Optional[Any] , A_: Optional[Any]=10 ) -> Dict:
"""simple docstring"""
__UpperCAmelCase =embed_questions_for_retrieval([question] , A_ , A_ )
__UpperCAmelCase , __UpperCAmelCase =eli5_train_q_index.search(A_ , A_ )
__UpperCAmelCase =[elia_train[int(A_ )] for i in I[0]]
return nn_examples
def lowercase__ ( A_: Union[str, Any] , A_: Any="wiki40b" , A_: Optional[int]="dense" , A_: Any=10 ) -> int:
"""simple docstring"""
if source == "none":
__UpperCAmelCase , __UpperCAmelCase =(""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase =query_qa_dense_index(
A_ , A_ , A_ , A_ , A_ , A_ )
else:
__UpperCAmelCase , __UpperCAmelCase =query_es_index(
A_ , A_ , index_name="""english_wiki40b_snippets_100w""" , n_results=A_ , )
__UpperCAmelCase =[
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
__UpperCAmelCase ="""question: {} context: {}""".format(A_ , A_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda A_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda A_ : None),
} )
def lowercase__ ( A_: Dict , A_: List[Any] , A_: Optional[Any] , A_: Tuple=64 , A_: int=256 , A_: Dict=False , A_: Dict=2 , A_: Dict=0.9_5 , A_: Optional[int]=0.8 ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
__UpperCAmelCase =qa_sas_generate(
A_ , A_ , A_ , num_answers=1 , num_beams=A_ , min_len=A_ , max_len=A_ , do_sample=A_ , temp=A_ , top_p=A_ , top_k=A_ , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__A = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__A = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__A = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__A = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__A = st.sidebar.checkbox("Demo options")
if demo_options:
__A = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__A = action_list.index(action_st)
__A = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__A = show_type == "Show full text of passages"
else:
__A = 3
__A = True
__A = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__A = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__A = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__A = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__A = "wiki40b"
__A = "dense"
__A = "beam"
__A = 2
__A = 64
__A = 2_56
__A = None
__A = None
__A = st.sidebar.checkbox("Generation options")
if generate_options:
__A = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__A = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__A = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__A = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__A = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__A = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__A = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__A = None
# start main text
__A = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__A = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__A = st.text_input("Enter your question here:", "")
else:
__A = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__A , __A = make_support(question, source=wiki_source, method="dense", n_results=10)
__A , __A = make_support(question, source=wiki_source, method="sparse", n_results=10)
__A = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__A = support_list[:10]
__A = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__A , __A = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__A , __A = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__A = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__A = res[1].strip()
if sec_titles == "":
__A = "[{}]({})".format(res[0], wiki_url)
else:
__A = sec_titles.split(" & ")
__A = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__A = find_nearest_training(question)
__A = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__A = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__A = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 68
|
def UpperCamelCase (lowercase_: list ) -> list:
A__ : Union[str, Any] = False
while is_sorted is False: # Until all the indices are traversed keep looping
A__ : Dict = True
for i in range(0 , len(lowercase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Optional[Any] = False
for i in range(1 , len(lowercase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
A__ , A__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
A__ : Optional[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
A_ : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A_ : Optional[int] = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 456
| 0
|
from __future__ import annotations
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
if len(UpperCamelCase ) == 0:
return []
lowerCamelCase__ ,lowerCamelCase__ : List[str] = min(UpperCamelCase ), max(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = int(max_value - min_value ) + 1
lowerCamelCase__ : list[list] = [[] for _ in range(UpperCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(UpperCamelCase )
return [v for bucket in buckets for v in sorted(UpperCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 96
|
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
while b:
lowerCamelCase__ ,lowerCamelCase__ : int = b, a % b
return a
def _A (UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase , a % b )
def _A () ->str:
'''simple docstring'''
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 96
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def lowerCAmelCase (__A = None):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''')
_a = nums[0]
for i in range(1 , len(__A)):
_a = nums[i]
_a = max(__A , ans + num , __A)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase_ = int(input("Enter number of elements : ").strip())
lowercase_ = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 11
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self : List[str] , _a : Dict , _a : Dict=13 , _a : Union[str, Any]=7 , _a : Dict=True , _a : Any=True , _a : Optional[int]=True , _a : List[Any]=True , _a : str=99 , _a : Union[str, Any]=32 , _a : List[Any]=2 , _a : Union[str, Any]=4 , _a : Dict=37 , _a : List[str]="gelu" , _a : Tuple=0.1 , _a : Optional[Any]=0.1 , _a : List[str]=512 , _a : Optional[Any]=16 , _a : List[Any]=2 , _a : int=0.02 , _a : Optional[Any]=3 , _a : Optional[Any]=4 , _a : Optional[Any]=None , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='gelu'
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def A ( self : int ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] , _a : Dict , _a : Any , _a : Union[str, Any] , _a : Union[str, Any] , _a : List[str] , _a : str , _a : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , _a : Optional[int] , _a : Tuple , _a : Any , _a : List[str] , _a : int , _a : Dict , _a : str ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =TFRoFormerForCausalLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : List[str] , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : List[Any] , _a : Any , _a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , _a : List[Any] , _a : Optional[Any] , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : str , _a : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForSequenceClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _a : int , _a : str , _a : Any , _a : Tuple , _a : int , _a : List[str] , _a : Optional[int] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =TFRoFormerForMultipleChoice(config=_a )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , _a : Dict , _a : Any , _a : Optional[Any] , _a : int , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForTokenClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , _a : Tuple , _a : Tuple , _a : Tuple , _a : List[Any] , _a : Any , _a : int , _a : str ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForQuestionAnswering(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def A ( self : Any , _a : int , _a : str , _a : Dict , _a : Any , _a : Any ) -> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : str ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : str ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def A ( self : str ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(_a )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE =5_0000
_SCREAMING_SNAKE_CASE =[1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE =emba(input_ids.shape )
_SCREAMING_SNAKE_CASE =tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_SCREAMING_SNAKE_CASE =emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE =embed_positions([2, 16, 768] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 405
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __a ( A ):
'''simple docstring'''
lowercase__ = 3_84
lowercase__ = 7
if "tiny" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 6, 2)
lowercase__ = (3, 6, 12, 24)
elif "small" in model_name:
lowercase__ = 96
lowercase__ = (2, 2, 18, 2)
lowercase__ = (3, 6, 12, 24)
elif "base" in model_name:
lowercase__ = 1_28
lowercase__ = (2, 2, 18, 2)
lowercase__ = (4, 8, 16, 32)
lowercase__ = 12
lowercase__ = 5_12
elif "large" in model_name:
lowercase__ = 1_92
lowercase__ = (2, 2, 18, 2)
lowercase__ = (6, 12, 24, 48)
lowercase__ = 12
lowercase__ = 7_68
# set label information
lowercase__ = 1_50
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
lowercase__ = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def __a ( A ):
'''simple docstring'''
lowercase__ = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = dct.pop(A )
lowercase__ = val
def __a ( A , A ):
'''simple docstring'''
lowercase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase__ = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:dim, :]
lowercase__ = in_proj_bias[: dim]
lowercase__ = in_proj_weight[
dim : dim * 2, :
]
lowercase__ = in_proj_bias[
dim : dim * 2
]
lowercase__ = in_proj_weight[
-dim :, :
]
lowercase__ = in_proj_bias[-dim :]
# fmt: on
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(A , 4 , in_channel // 4 )
lowercase__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = x.shape
lowercase__ = x.reshape(A , in_channel // 4 , 4 )
lowercase__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def __a ( A ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(4 , in_channel // 4 )
lowercase__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def __a ( A ):
'''simple docstring'''
lowercase__ = x.shape[0]
lowercase__ = x.reshape(in_channel // 4 , 4 )
lowercase__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowercase__ = model_name_to_url[model_name]
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
lowercase__ = get_upernet_config(A )
lowercase__ = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
if "bn" in key:
lowercase__ = key.replace("bn" , "batch_norm" )
lowercase__ = val
# rename keys
lowercase__ = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase__ = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
lowercase__ = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
lowercase__ = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
lowercase__ = SegformerImageProcessor()
lowercase__ = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowercase__ = model(A )
lowercase__ = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase_: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F'upernet-swin-{size}' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 668
|
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : Any , lowerCamelCase : List[str]=7 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Dict=30 , lowerCamelCase : List[Any]=400 , lowerCamelCase : int=True , lowerCamelCase : Tuple=None , lowerCamelCase : str=True , lowerCamelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCamelCase : Any=[0.5, 0.5, 0.5] , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=1 / 255 , lowerCamelCase : str=True , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase ( self : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
_UpperCAmelCase = image.size
else:
_UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size["""shortest_edge"""] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(snake_case__ , key=lambda lowerCamelCase : item[0] )[0]
_UpperCAmelCase = max(snake_case__ , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case__ , """image_std""" ) )
self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case__ , """do_rescale""" ) )
self.assertTrue(hasattr(snake_case__ , """do_pad""" ) )
self.assertTrue(hasattr(snake_case__ , """size""" ) )
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 3_9769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) )
@slow
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format="""coco_panoptic""" )
_UpperCAmelCase = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="""pt""" )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) )
# verify masks
_UpperCAmelCase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case__ )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) )
| 108
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase : Optional[int] = datasets.logging.get_logger(__name__)
lowerCAmelCase : List[str] = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowerCAmelCase : List[Any] = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowerCAmelCase : str = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowerCAmelCase : Optional[Any] = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , )
def a ( self , snake_case__ ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' )
_lowerCAmelCase : Tuple = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
_lowerCAmelCase : int = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_lowerCAmelCase : str = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_lowerCAmelCase : str = score.BleurtScorer(os.path.join(snake_case__ , snake_case__ ) )
def a ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scorer.score(references=snake_case__ , candidates=snake_case__ )
return {"scores": scores}
| 444
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : torch.FloatTensor
class a__ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__(self , __lowercase = 16 , __lowercase = 88 , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = 32 , __lowercase = None , __lowercase = False , __lowercase = None , __lowercase = "geglu" , __lowercase = True , __lowercase = True , ):
super().__init__()
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = attention_head_dim
__lowerCAmelCase = num_attention_heads * attention_head_dim
__lowerCAmelCase = in_channels
__lowerCAmelCase = torch.nn.GroupNorm(num_groups=__lowercase , num_channels=__lowercase , eps=1e-6 , affine=__lowercase )
__lowerCAmelCase = nn.Linear(__lowercase , __lowercase )
# 3. Define transformers blocks
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , cross_attention_dim=__lowercase , activation_fn=__lowercase , attention_bias=__lowercase , double_self_attention=__lowercase , norm_elementwise_affine=__lowercase , )
for d in range(__lowercase )
] )
__lowerCAmelCase = nn.Linear(__lowercase , __lowercase )
def _snake_case (self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=1 , __lowercase=None , __lowercase = True , ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = hidden_states.shape
__lowerCAmelCase = batch_frames // num_frames
__lowerCAmelCase = hidden_states
__lowerCAmelCase = hidden_states[None, :].reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__lowerCAmelCase = self.norm(__lowercase )
__lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowercase , __lowercase )
__lowerCAmelCase = self.proj_in(__lowercase )
# 2. Blocks
for block in self.transformer_blocks:
__lowerCAmelCase = block(
__lowercase , encoder_hidden_states=__lowercase , timestep=__lowercase , cross_attention_kwargs=__lowercase , class_labels=__lowercase , )
# 3. Output
__lowerCAmelCase = self.proj_out(__lowercase )
__lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__lowerCAmelCase = hidden_states.reshape(__lowercase , __lowercase , __lowercase , __lowercase )
__lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowercase )
| 714
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase : Tuple = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __magic_name__( lowerCamelCase):
if isinstance(lowerCamelCase, torch.Tensor):
return image
elif isinstance(lowerCamelCase, PIL.Image.Image):
__lowerCAmelCase = [image]
__lowerCAmelCase = [trans(img.convert('''RGB''')) for img in image]
__lowerCAmelCase = torch.stack(lowerCamelCase)
return image
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__lowercase , scheduler=__lowercase )
def _snake_case (self , __lowercase ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
# get the original timestep using init_timestep
__lowerCAmelCase = min(int(num_inference_steps * strength ) , __lowercase )
__lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
__lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=None ):
if not isinstance(__lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowercase )}""" )
__lowerCAmelCase = image.to(device=__lowercase , dtype=__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = init_latents.shape
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
# get latents
print('''add noise to latents at timestep''' , __lowercase )
__lowerCAmelCase = self.scheduler.add_noise(__lowercase , __lowercase , __lowercase )
__lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__(self , __lowercase = None , __lowercase = 0.8 , __lowercase = 1 , __lowercase = None , __lowercase = 0.0 , __lowercase = 50 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ):
self.check_inputs(__lowercase )
# 2. Preprocess image
__lowerCAmelCase = preprocess(__lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(__lowercase , device=self.device )
__lowerCAmelCase , __lowerCAmelCase = self.get_timesteps(__lowercase , __lowercase , self.device )
__lowerCAmelCase = timesteps[:1].repeat(__lowercase )
# 4. Prepare latent variables
__lowerCAmelCase = self.prepare_latents(__lowercase , __lowercase , __lowercase , self.unet.dtype , self.device , __lowercase )
__lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__lowercase ):
# 1. predict noise model_output
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
__lowercase , __lowercase , __lowercase , eta=__lowercase , use_clipped_model_output=__lowercase , generator=__lowercase , ).prev_sample
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowercase )
| 474
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case = 8
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=BITS ) -> List[str]:
_snake_case = x.device
_snake_case = (x * 255).int().clamp(0 , 255 )
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase_ )
_snake_case = rearrange(lowerCAmelCase_ , '''d -> d 1 1''' )
_snake_case = rearrange(lowerCAmelCase_ , '''b c h w -> b c 1 h w''' )
_snake_case = ((x & mask) != 0).float()
_snake_case = rearrange(lowerCAmelCase_ , '''b c d h w -> b (c d) h w''' )
_snake_case = bits * 2 - 1
return bits
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=BITS ) -> Union[str, Any]:
_snake_case = x.device
_snake_case = (x > 0).int()
_snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowerCAmelCase_ , dtype=torch.intaa )
_snake_case = rearrange(lowerCAmelCase_ , '''d -> d 1 1''' )
_snake_case = rearrange(lowerCAmelCase_ , '''b (c d) h w -> b c d h w''' , d=8 )
_snake_case = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = True , lowerCAmelCase_=None , lowerCAmelCase_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_snake_case = self.alphas_cumprod[timestep]
_snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(lowerCAmelCase_ , -scale , lowerCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_snake_case = self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_snake_case = model_output.device if torch.is_tensor(lowerCAmelCase_ ) else '''cpu'''
_snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ ) ** 0.5 * eta * noise
_snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def snake_case ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="epsilon" , lowerCAmelCase_=None , lowerCAmelCase_ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
_snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_snake_case , _snake_case = torch.split(lowerCAmelCase_ , sample.shape[1] , dim=1 )
else:
_snake_case = None
# 1. compute alphas, betas
_snake_case = self.alphas_cumprod[t]
_snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
_snake_case = 1 - alpha_prod_t
_snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_snake_case = model_output
else:
raise ValueError(f"""Unsupported prediction_type {prediction_type}.""" )
# 3. Clip "predicted x_0"
_snake_case = self.bit_scale
if self.config.clip_sample:
_snake_case = torch.clamp(lowerCAmelCase_ , -scale , lowerCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_snake_case = 0
if t > 0:
_snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowerCAmelCase_ ).to(model_output.device )
_snake_case = (self._get_variance(lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, DDPMScheduler] , __lowerCamelCase : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
_snake_case = bit_scale
_snake_case = (
ddim_bit_scheduler_step if isinstance(__lowerCamelCase , __lowerCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __lowerCamelCase : Optional[int] = 2_5_6 , __lowerCamelCase : Optional[int] = 2_5_6 , __lowerCamelCase : Optional[int] = 5_0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
_snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowerCamelCase , )
_snake_case = decimal_to_bits(__lowerCamelCase ) * self.bit_scale
_snake_case = latents.to(self.device )
self.scheduler.set_timesteps(__lowerCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_snake_case = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
_snake_case = bits_to_decimal(__lowerCamelCase )
if output_type == "pil":
_snake_case = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 103
|
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_snake_case : List[Any] = ''
_snake_case : Dict = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_snake_case , _snake_case : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
_snake_case : Optional[Any] = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
_snake_case : Any = 0
for j in range(len(lowerCAmelCase ) ):
_snake_case : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_snake_case : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_snake_case : List[str] = j - k + 1 # noqa: E741
_snake_case : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
_snake_case : List[Any] = length[j]
_snake_case : Optional[Any] = j
# create that string
_snake_case : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411
| 0
|
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
return abs(__a ) if a == 0 else greatest_common_divisor(b % a, __a )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = y, x % y
return abs(__a )
def lowerCamelCase_ ( ) -> Tuple:
try:
UpperCAmelCase_ : Any = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase_ : Optional[Any] = int(nums[0] )
UpperCAmelCase_ : List[str] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__a, __a )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__a, __a )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = "▁"
snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
snake_case_ : int = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
snake_case_ : Optional[Any] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
snake_case_ : Dict = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
snake_case_ : Any = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __a (lowerCamelCase ):
__a : List[str] = ["input_ids"]
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Tuple = PRETRAINED_INIT_CONFIGURATION
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ )
else:
UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any:
"""simple docstring"""
if text is None:
return None
UpperCAmelCase_ : str = self.tokenize(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', []
for i, ch in enumerate(__magic_name__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ )
if self.is_whitespace(__magic_name__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(__magic_name__ ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase_ : Tuple = token[1:]
UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset
UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase_ : int = end
return token_mapping
@property
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : Optional[Any] = None
return state
def __setstate__( self : str , __magic_name__ : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]:
"""simple docstring"""
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
UpperCAmelCase_ : Dict = True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ )
else:
UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : List[Any] = []
for pi, piece in enumerate(__magic_name__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0:
new_pieces.append(__magic_name__ )
continue
else:
continue
UpperCAmelCase_ : List[str] = 0
for i, chunk in enumerate(__magic_name__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(__magic_name__ )
UpperCAmelCase_ : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : List[str] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase_ : str = i
if len(__magic_name__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.reverse_vocab.get(__magic_name__ , self.unk_token )
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1]
return [1] + ([0] * len(__magic_name__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__magic_name__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3)
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__magic_name__ ) == 1:
UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {}
with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(__magic_name__ ):
UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' )
UpperCAmelCase_ : Dict = int(__magic_name__ )
return token_to_idx
def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 0
if os.path.isdir(__magic_name__ ):
UpperCAmelCase_ : Any = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
UpperCAmelCase_ : Dict = token_index
writer.write(token + '''\n''' )
index += 1
UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' )
with open(__magic_name__ , '''wb''' ) as fi:
UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (vocab_file,)
| 644
| 0
|
"""simple docstring"""
import os
import sys
__UpperCAmelCase = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__UpperCAmelCase = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 65
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "fnet"
def __init__(self , _lowercase=32000 , _lowercase=768 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu_new" , _lowercase=0.1 , _lowercase=512 , _lowercase=4 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=False , _lowercase=512 , _lowercase=3 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__a : Optional[int] = vocab_size
__a : Tuple = max_position_embeddings
__a : str = hidden_size
__a : Tuple = num_hidden_layers
__a : List[str] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Tuple = initializer_range
__a : List[Any] = type_vocab_size
__a : Optional[int] = layer_norm_eps
__a : List[Any] = use_tpu_fourier_optimizations
__a : int = tpu_short_seq_length
| 581
| 0
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_12,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __UpperCamelCase ( _A : int ) ->Optional[int]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__A : List[Any] = parser.parse_args()
__A : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 75
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =simple_accuracy(_A , _A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ ={}
for id_pred, label in zip(_A , _A ):
lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowerCamelCase_ =id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ =zip(*_A )
lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" )
fas.append(_A )
lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
lowerCamelCase_ =float(sum(_A ) / len(_A ) )
lowerCamelCase_ =sum(_A ) / len(_A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _snake_case ( self )-> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase_ =[
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 75
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( __a ):
def A_ (self ) -> Any:
UpperCamelCase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , """depth_multiplier""" ) )
class UpperCamelCase :
def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.25 , __UpperCamelCase=8 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=32 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu6" , __UpperCamelCase=1_280 , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : List[str] = batch_size
UpperCamelCase_ : int = num_channels
UpperCamelCase_ : Any = image_size
UpperCamelCase_ : Any = depth_multiplier
UpperCamelCase_ : str = depth_divisible_by
UpperCamelCase_ : List[str] = min_depth
UpperCamelCase_ : List[str] = expand_ratio
UpperCamelCase_ : Optional[int] = tf_padding
UpperCamelCase_ : int = output_stride
UpperCamelCase_ : Union[str, Any] = first_layer_is_expansion
UpperCamelCase_ : Any = finegrained_output
UpperCamelCase_ : Tuple = hidden_act
UpperCamelCase_ : Dict = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_ : Tuple = classifier_dropout_prob
UpperCamelCase_ : Union[str, Any] = use_labels
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : int = num_labels
UpperCamelCase_ : Tuple = initializer_range
UpperCamelCase_ : List[Any] = scope
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : Optional[Any] = None
if self.use_labels:
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def A_ (self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
UpperCamelCase_ : Optional[Any] = MobileNetVaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
UpperCamelCase_ : Tuple = self.num_labels
UpperCamelCase_ : Tuple = MobileNetVaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Dict = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
UpperCamelCase_ : Dict = self.num_labels
UpperCamelCase_ : List[Any] = MobileNetVaForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_ : List[str] = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A_ (self ) -> Any:
UpperCamelCase_ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Optional[Any] = config_and_inputs
UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __a , __a , unittest.TestCase ):
a__ :Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a__ :Tuple = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ :Union[str, Any] = False
a__ :List[str] = False
a__ :Dict = False
a__ :List[str] = False
def A_ (self ) -> List[Any]:
UpperCamelCase_ : List[Any] = MobileNetVaModelTester(self )
UpperCamelCase_ : List[Any] = MobileNetVaConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def A_ (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def A_ (self ) -> Any:
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def A_ (self ) -> int:
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def A_ (self ) -> Any:
pass
def A_ (self ) -> Tuple:
UpperCamelCase_,UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(__UpperCamelCase )
UpperCamelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : List[str] = [*signature.parameters.keys()]
UpperCamelCase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def A_ (self ) -> List[str]:
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def A_ (self ) -> Dict:
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ : Optional[int] = outputs.hidden_states
UpperCamelCase_ : Union[str, Any] = 16
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : int = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def A_ (self ) -> Any:
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
@slow
def A_ (self ) -> str:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] = MobileNetVaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def A_ (self ) -> List[str]:
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__UpperCamelCase )
UpperCamelCase_ : int = self.default_image_processor
UpperCamelCase_ : Optional[int] = prepare_img()
UpperCamelCase_ : str = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : str = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ : Union[str, Any] = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
def A_ (self ) -> Union[str, Any]:
UpperCamelCase_ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCamelCase_ : Union[str, Any] = model.to(__UpperCamelCase )
UpperCamelCase_ : Any = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
UpperCamelCase_ : str = prepare_img()
UpperCamelCase_ : Union[str, Any] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : List[str] = model(**__UpperCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
# verify the logits
UpperCamelCase_ : Any = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=__UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 635
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCamelCase ( __a ):
a__ :Any = '''bloom'''
a__ :Optional[int] = ['''past_key_values''']
a__ :Tuple = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__(self , __UpperCamelCase=250_880 , __UpperCamelCase=64 , __UpperCamelCase=2 , __UpperCamelCase=8 , __UpperCamelCase=1E-5 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=1 , __UpperCamelCase=2 , __UpperCamelCase=False , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1 , __UpperCamelCase=False , **__UpperCamelCase , ) -> List[Any]:
UpperCamelCase_ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase_ : Optional[Any] = kwargs.pop("""n_embed""" , __UpperCamelCase )
UpperCamelCase_ : List[Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase_ : List[Any] = n_layer
UpperCamelCase_ : Optional[Any] = n_head
UpperCamelCase_ : List[str] = layer_norm_epsilon
UpperCamelCase_ : int = initializer_range
UpperCamelCase_ : List[str] = use_cache
UpperCamelCase_ : List[Any] = pretraining_tp
UpperCamelCase_ : Optional[int] = apply_residual_connection_post_layernorm
UpperCamelCase_ : Any = hidden_dropout
UpperCamelCase_ : Optional[Any] = attention_dropout
UpperCamelCase_ : List[str] = bos_token_id
UpperCamelCase_ : Optional[Any] = eos_token_id
UpperCamelCase_ : str = slow_but_exact
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
class UpperCamelCase ( __a ):
a__ :Dict = version.parse('''1.12''' )
def __init__(self , __UpperCamelCase , __UpperCamelCase = "default" , __UpperCamelCase = None , __UpperCamelCase = False , ) -> List[str]:
super().__init__(__UpperCamelCase , task=__UpperCamelCase , patching_specs=__UpperCamelCase , use_past=__UpperCamelCase )
if not getattr(self._config , """pad_token_id""" , __UpperCamelCase ):
# TODO: how to do that better?
UpperCamelCase_ : Optional[Any] = 0
@property
def A_ (self ) -> Mapping[str, Mapping[int, str]]:
UpperCamelCase_ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__UpperCamelCase , direction="""inputs""" , inverted_values_shape=__UpperCamelCase )
UpperCamelCase_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCamelCase_ : Dict = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A_ (self ) -> int:
return self._config.n_layer
@property
def A_ (self ) -> int:
return self._config.n_head
@property
def A_ (self ) -> float:
return 1E-3
def A_ (self , __UpperCamelCase , __UpperCamelCase = -1 , __UpperCamelCase = -1 , __UpperCamelCase = False , __UpperCamelCase = None , ) -> Mapping[str, Any]:
UpperCamelCase_ : Union[str, Any] = super(__UpperCamelCase , self ).generate_dummy_inputs(
__UpperCamelCase , batch_size=__UpperCamelCase , seq_length=__UpperCamelCase , is_pair=__UpperCamelCase , framework=__UpperCamelCase )
# We need to order the input in the way they appears in the forward()
UpperCamelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCamelCase_,UpperCamelCase_ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase_ : List[str] = seqlen + 2
UpperCamelCase_ : Tuple = self._config.hidden_size // self.num_attention_heads
UpperCamelCase_ : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase_ : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase_ : Any = [
(torch.zeros(__UpperCamelCase ), torch.zeros(__UpperCamelCase )) for _ in range(self.num_layers )
]
UpperCamelCase_ : int = common_inputs["""attention_mask"""]
if self.use_past:
UpperCamelCase_ : List[str] = ordered_inputs["""attention_mask"""].dtype
UpperCamelCase_ : Any = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__UpperCamelCase , __UpperCamelCase , dtype=__UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def A_ (self ) -> int:
return 13
| 635
| 1
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 4000000 ):
'''simple docstring'''
A_ = []
A_ ,A_ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
A_ ,A_ = b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'{solution() = }')
| 563
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__lowercase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class _lowercase :
_lowercase : str
_lowercase : Optional[str] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
_lowercase : Optional[Union[str, int]] = None
def UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
A_ ,A_ ,A_ = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Any:
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.major, self.minor, self.patch
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Version(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return other
raise TypeError(F"{other} (type {type(lowerCamelCase__ )}) cannot be compared to version." )
def __eq__( self : List[Any] , lowerCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
try:
A_ = self._validate_operand(lowerCamelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[int] , lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self._validate_operand(lowerCamelCase__ )
return self.tuple < other.tuple
def __hash__( self : List[str] ) -> List[str]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase ( cls : List[Any] , lowerCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
A_ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
return self.version_str
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = _VERSION_REG.match(SCREAMING_SNAKE_CASE )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return ".".join(str(SCREAMING_SNAKE_CASE ) for v in version_tuple )
| 563
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__snake_case, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size['''shortest_edge'''] , default_to_square=__a)
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> Dict:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = to_numpy_array(__a)
if do_resize:
_UpperCamelCase = self.resize(image=__a , size=__a , resample=__a)
if do_center_crop:
_UpperCamelCase = self.center_crop(__a , size=__a)
if do_rescale:
_UpperCamelCase = self.rescale(image=__a , scale=__a)
if do_normalize:
_UpperCamelCase = self.normalize(image=__a , mean=__a , std=__a)
_UpperCamelCase = to_channel_dimension_format(__a , __a)
return image
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
_UpperCamelCase = make_batched(__a)
_UpperCamelCase = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase = {'''pixel_values''': videos}
return BatchFeature(data=__a , tensor_type=__a)
| 19
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = data
SCREAMING_SNAKE_CASE_ = [0x6_7_4_5_2_3_0_1, 0xe_f_c_d_a_b_8_9, 0x9_8_b_a_d_c_f_e, 0x1_0_3_2_5_4_7_6, 0xc_3_d_2_e_1_f_0]
@staticmethod
def a__ ( _lowercase, _lowercase ) -> List[str]:
return ((n << b) | (n >> (32 - b))) & 0xf_f_f_f_f_f_f_f
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = B'\x80' + B'\x00' * (63 - (len(self.data ) + 8) % 64)
SCREAMING_SNAKE_CASE_ = self.data + padding + struct.pack('>Q', 8 * len(self.data ) )
return padded_data
def a__ ( self ) -> Optional[Any]:
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def a__ ( self, _lowercase ) -> int:
SCREAMING_SNAKE_CASE_ = list(struct.unpack('>16L', _lowercase ) ) + [0] * 64
for i in range(16, 80 ):
SCREAMING_SNAKE_CASE_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def a__ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.padding()
SCREAMING_SNAKE_CASE_ = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE_ = self.expand_block(_lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
SCREAMING_SNAKE_CASE_ = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE_ = 0x5_a_8_2_7_9_9_9
elif 20 <= i < 40:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = 0x6_e_d_9_e_b_a_1
elif 40 <= i < 60:
SCREAMING_SNAKE_CASE_ = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE_ = 0x8_f_1_b_b_c_d_c
elif 60 <= i < 80:
SCREAMING_SNAKE_CASE_ = b ^ c ^ d
SCREAMING_SNAKE_CASE_ = 0xc_a_6_2_c_1_d_6
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.rotate(_lowercase, 5 ) + f + e + k + expanded_block[i] & 0xf_f_f_f_f_f_f_f,
a,
self.rotate(_lowercase, 30 ),
c,
d,
)
SCREAMING_SNAKE_CASE_ = (
self.h[0] + a & 0xf_f_f_f_f_f_f_f,
self.h[1] + b & 0xf_f_f_f_f_f_f_f,
self.h[2] + c & 0xf_f_f_f_f_f_f_f,
self.h[3] + d & 0xf_f_f_f_f_f_f_f,
self.h[4] + e & 0xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def _UpperCamelCase ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = B'Test String'
assert SHAaHash(lowerCAmelCase__ ).final_hash() == hashlib.shaa(lowerCAmelCase__ ).hexdigest() # noqa: S324
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='Process some strings or files' )
parser.add_argument(
'--string' ,dest='input_string' ,default='Hello World!! Welcome to Cryptography' ,help='Hash the string' ,)
parser.add_argument('--file' ,dest='input_file' ,help='Hash contents of a file' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
else:
SCREAMING_SNAKE_CASE_ = bytes(lowerCAmelCase__ ,'utf-8' )
print(SHAaHash(lowerCAmelCase__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 294
| 0
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : Any = False
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : str = "ybelkada/fonts"
def _UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
requires_backends(_lowerCAmelCase , ["""torch"""] )
_check_torch_version()
lowerCAmelCase__ = image_tensor.unsqueeze(0 )
lowerCAmelCase__ = torch.nn.functional.unfold(_lowerCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowerCAmelCase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , _lowerCAmelCase , _lowerCAmelCase , -1 )
lowerCAmelCase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 36 , lowerCamelCase__ = "black" , lowerCamelCase__ = "white" , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = 5 , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> int:
"""simple docstring"""
requires_backends(_lowerCAmelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowerCAmelCase__ = textwrap.TextWrapper(width=80 )
lowerCAmelCase__ = wrapper.wrap(text=_lowerCAmelCase )
lowerCAmelCase__ = """\n""".join(_lowerCAmelCase )
if font_bytes is not None and font_path is None:
lowerCAmelCase__ = io.BytesIO(_lowerCAmelCase )
elif font_path is not None:
lowerCAmelCase__ = font_path
else:
lowerCAmelCase__ = hf_hub_download(_lowerCAmelCase , """Arial.TTF""" )
lowerCAmelCase__ = ImageFont.truetype(_lowerCAmelCase , encoding="""UTF-8""" , size=_lowerCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowerCAmelCase__ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , _lowerCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = temp_draw.textbbox((0, 0) , _lowerCAmelCase , _lowerCAmelCase )
# Create the actual image with a bit of padding around the text.
lowerCAmelCase__ = text_width + left_padding + right_padding
lowerCAmelCase__ = text_height + top_padding + bottom_padding
lowerCAmelCase__ = Image.new("""RGB""" , (image_width, image_height) , _lowerCAmelCase )
lowerCAmelCase__ = ImageDraw.Draw(_lowerCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=_lowerCAmelCase , fill=_lowerCAmelCase , font=_lowerCAmelCase )
return image
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(_lowerCAmelCase , """vision""" )
# Convert to PIL image if necessary
lowerCAmelCase__ = to_pil_image(_lowerCAmelCase )
lowerCAmelCase__ = render_text(_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ = max(header_image.width , image.width )
lowerCAmelCase__ = int(image.height * (new_width / image.width) )
lowerCAmelCase__ = int(header_image.height * (new_width / header_image.width) )
lowerCAmelCase__ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowerCAmelCase__ = to_numpy_array(_lowerCAmelCase )
if infer_channel_dimension_format(_lowerCAmelCase ) == ChannelDimension.LAST:
lowerCAmelCase__ = to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST )
return new_image
class a_ ( __UpperCAmelCase ):
UpperCamelCase_ : List[Any] = ["flattened_patches"]
def __init__( self : str , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 2048 , snake_case__ : bool = False , **snake_case__ : int , ):
super().__init__(**_lowerCamelCase )
lowerCAmelCase__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = do_convert_rgb
lowerCAmelCase__ = max_patches
lowerCAmelCase__ = is_vqa
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : np.ndarray , snake_case__ : int , snake_case__ : dict , **snake_case__ : Union[str, Any] ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowerCAmelCase__ = to_channel_dimension_format(_lowerCamelCase , ChannelDimension.FIRST )
lowerCAmelCase__ = torch.from_numpy(_lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ = patch_size["""height"""], patch_size["""width"""]
lowerCAmelCase__ , lowerCAmelCase__ = get_image_size(_lowerCamelCase )
# maximize scale s.t.
lowerCAmelCase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowerCAmelCase__ = max(min(math.floor(scale * image_height / patch_height ) , _lowerCamelCase ) , 1 )
lowerCAmelCase__ = max(min(math.floor(scale * image_width / patch_width ) , _lowerCamelCase ) , 1 )
lowerCAmelCase__ = max(num_feasible_rows * patch_height , 1 )
lowerCAmelCase__ = max(num_feasible_cols * patch_width , 1 )
lowerCAmelCase__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=_lowerCamelCase , antialias=_lowerCamelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch_extract_patches(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase__ = patches.shape
lowerCAmelCase__ = patches_shape[1]
lowerCAmelCase__ = patches_shape[2]
lowerCAmelCase__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowerCAmelCase__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowerCAmelCase__ = torch.arange(_lowerCamelCase ).reshape([rows, 1] ).repeat(1 , _lowerCamelCase ).reshape([rows * columns, 1] )
lowerCAmelCase__ = torch.arange(_lowerCamelCase ).reshape([1, columns] ).repeat(_lowerCamelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowerCAmelCase__ = row_ids.to(torch.floataa )
lowerCAmelCase__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowerCAmelCase__ = torch.nn.functional.pad(_lowerCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowerCAmelCase__ = to_numpy_array(_lowerCamelCase )
return result
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : np.ndarray , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : str ):
if image.dtype == np.uinta:
lowerCAmelCase__ = image.astype(np.floataa )
# take mean across the whole `image`
lowerCAmelCase__ = np.mean(_lowerCamelCase )
lowerCAmelCase__ = np.std(_lowerCamelCase )
lowerCAmelCase__ = max(_lowerCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : ImageInput , snake_case__ : Optional[str] = None , snake_case__ : bool = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : int , ):
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ = patch_size if patch_size is not None else self.patch_size
lowerCAmelCase__ = max_patches if max_patches is not None else self.max_patches
lowerCAmelCase__ = self.is_vqa
if kwargs.get("""data_format""" , _lowerCamelCase ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowerCAmelCase__ = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(_lowerCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowerCAmelCase__ = kwargs.pop("""font_bytes""" , _lowerCamelCase )
lowerCAmelCase__ = kwargs.pop("""font_path""" , _lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase__ = [header_text] * len(_lowerCamelCase )
lowerCAmelCase__ = [
render_header(_lowerCamelCase , header_text[i] , font_bytes=_lowerCamelCase , font_path=_lowerCamelCase )
for i, image in enumerate(_lowerCamelCase )
]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=_lowerCamelCase ) for image in images]
# convert to torch tensor and permute
lowerCAmelCase__ = [
self.extract_flattened_patches(image=_lowerCamelCase , max_patches=_lowerCamelCase , patch_size=_lowerCamelCase )
for image in images
]
# create attention mask in numpy
lowerCAmelCase__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowerCAmelCase__ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=_lowerCamelCase )
return encoded_outputs
| 701
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( enum.Enum ):
__A : Optional[int] = 0
__A : Dict = 1
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ ):
__A : Tuple = "generated"
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ):
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self , *_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , UpperCAmelCase__ ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
_UpperCAmelCase = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , **_UpperCamelCase ):
_UpperCAmelCase = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if self.framework == "pt":
_UpperCAmelCase , _UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
_UpperCAmelCase , _UpperCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
_UpperCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
_UpperCAmelCase = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=ReturnType.TEXT , _UpperCamelCase=False ):
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ ):
__A : str = "summary"
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ ):
__A : str = "translation"
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def UpperCamelCase( self , *_UpperCamelCase , _UpperCamelCase=TruncationStrategy.DO_NOT_TRUNCATE , _UpperCamelCase=None , _UpperCamelCase=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def UpperCamelCase( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get('''task''' , self.task )
_UpperCAmelCase = task.split('''_''' )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 32
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 682
| 0
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ) -> Any:
'''simple docstring'''
_lowercase : Any = {}
def __lowercase ( self : Union[str, Any] ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase__ , ''' -> ''' , ''' -> '''.join([str(lowerCAmelCase__ ) for j in self.vertex[i]] ) )
def __lowercase ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase__ )
else:
# else make a new vertex
_lowercase : List[str] = [to_vertex]
def __lowercase ( self : Union[str, Any] ) -> None:
'''simple docstring'''
_lowercase : List[str] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowercase ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> None:
'''simple docstring'''
_lowercase : List[str] = True
print(lowerCAmelCase__ , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase__ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 709
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[int] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Any , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : str , **UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Any , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Dict , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> str:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : List[str] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : int , **UpperCamelCase_ : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Optional[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[str] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : int , *UpperCamelCase_ : Any , **UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : int , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : str , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
class _lowerCAmelCase ( metaclass=__A ):
'''simple docstring'''
snake_case_ = ['flax']
def __init__( self : Dict , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''flax'''] )
@classmethod
def __lowercase ( cls : Dict , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
@classmethod
def __lowercase ( cls : Optional[int] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''flax'''] )
| 411
| 0
|
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(__UpperCamelCase )
__a = [[0] * n for i in range(__UpperCamelCase )]
for i in range(__UpperCamelCase ):
__a = y_points[i]
for i in range(2 , __UpperCamelCase ):
for j in range(__UpperCamelCase , __UpperCamelCase ):
__a = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a :Any = logging.getLogger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None ):
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
A_ = None
def __A ( self : Dict , UpperCAmelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ = str(distributed_port + 1 )
A_ = dist.new_group(ranks=UpperCAmelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __A ( self : List[str] ):
return dist.get_rank(group=self.process_group ) == 0
def __A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict=torch.floataa ):
A_ = torch.empty(UpperCAmelCase , dtype=UpperCAmelCase )
dist.scatter(UpperCAmelCase , src=0 , scatter_list=UpperCAmelCase , group=self.process_group )
return target_tensor
def __A ( self : Any ):
A_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ = next((addr for addr in addrs if addr.startswith("e" )) , UpperCAmelCase )
return ifname
def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : int ):
# single GPU training
if not dist.is_initialized():
A_ , A_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
# distributed training
A_ = dist.get_world_size(group=self.process_group )
# gather logic
A_ = None
if self._is_main():
A_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCAmelCase )]
dist.gather(torch.tensor(UpperCAmelCase ) , dst=0 , gather_list=UpperCAmelCase , group=self.process_group )
# scatter logic
A_ = question_hidden_states.shape[0]
A_ = []
A_ = []
if self._is_main():
assert len(UpperCAmelCase ) == world_size
A_ , A_ = self._main_retrieve(torch.cat(UpperCAmelCase ).numpy() , UpperCAmelCase )
A_ , A_ = torch.tensor(UpperCAmelCase ), torch.tensor(UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._chunk_tensor(UpperCAmelCase , UpperCAmelCase )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
A_ = self._scattered(UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase )
| 86
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({} )
_lowerCamelCase = "text"
@property
def lowerCAmelCase__ ( self ):
return {self.text_column: "text"}
| 190
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = ['''pixel_values''']
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = 32 , UpperCamelCase_=PILImageResampling.BILINEAR , UpperCamelCase_ = True , **UpperCamelCase_ , ):
__magic_name__ = do_resize
__magic_name__ = do_rescale
__magic_name__ = size_divisor
__magic_name__ = resample
super().__init__(**UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
__magic_name__ , __magic_name__ = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
__magic_name__ = height // size_divisor * size_divisor
__magic_name__ = width // size_divisor * size_divisor
__magic_name__ = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
__magic_name__ = do_resize if do_resize is not None else self.do_resize
__magic_name__ = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ = size_divisor if size_divisor is not None else self.size_divisor
__magic_name__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''' )
__magic_name__ = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('''Invalid image(s)''' )
# All transformations expect numpy arrays.
__magic_name__ = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
__magic_name__ = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
__magic_name__ = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images]
__magic_name__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__magic_name__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 190
| 1
|
def __UpperCAmelCase ( __a : float ,__a : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
_a : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) )
return round(__a ,ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
|
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665
| 0
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase__ : Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A_ ( snake_case : str ) -> str:
'''simple docstring'''
if "://" in dataset_path:
__UpperCamelCase = dataset_path.split('''://''' )[1]
return dataset_path
def A_ ( snake_case : fsspec.AbstractFileSystem ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A_ ( snake_case : fsspec.AbstractFileSystem , snake_case : str , snake_case : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = not is_remote_filesystem(snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(snake_case ) , fs._strip_protocol(snake_case ) )
else:
fs.mv(snake_case , snake_case , recursive=snake_case )
def A_ ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = threading.Lock()
| 451
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = {}
if "candidate_labels" in kwargs:
__UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = load_image(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase = candidate_labels
__UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [text_inputs]
return inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase = model_inputs.pop('''candidate_labels''' )
__UpperCamelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = text_inputs[0]
else:
# Batching case.
__UpperCamelCase = text_inputs[0][0]
__UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = model_outputs.pop('''candidate_labels''' )
__UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [scores]
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 451
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 18}
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = min_resolution
UpperCAmelCase__ = max_resolution
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean
UpperCAmelCase__ = image_std
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'do_center_crop' ) )
self.assertTrue(hasattr(__a , 'size' ) )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
UpperCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCAmelCase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 146
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=2 , generator=__a , output_type='numpy' , return_dict=__a )[0]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'google/ncsnpp-celebahq-256'
UpperCAmelCase__ = UNetaDModel.from_pretrained(__a )
UpperCAmelCase__ = KarrasVeScheduler()
UpperCAmelCase__ = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(num_inference_steps=20 , generator=__a , output_type='numpy' ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 146
| 1
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase__ : Union[str, Any] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowerCamelCase__ : int = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowerCamelCase__ : str = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowercase__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float"""),
"""references""": datasets.Value("""float"""),
}) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False):
'''simple docstring'''
lowercase__ : List[str] = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 708
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = data
lowercase__ : Node | None = None
class _snake_case :
def __init__( self):
'''simple docstring'''
lowercase__ : List[Any] = None
lowercase__ : int = None
def __iter__( self):
'''simple docstring'''
lowercase__ : List[str] = self.head
while self.head:
yield node.data
lowercase__ : str = node.next
if node == self.head:
break
def __len__( self):
'''simple docstring'''
return sum(1 for _ in self)
def __repr__( self):
'''simple docstring'''
return "->".join(str(SCREAMING_SNAKE_CASE_) for item in iter(self))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(len(self) , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if index < 0 or index > len(self):
raise IndexError("""list index out of range.""")
lowercase__ : Optional[int] = Node(SCREAMING_SNAKE_CASE_)
if self.head is None:
lowercase__ : Union[str, Any] = new_node # first node points itself
lowercase__ : Dict = new_node
elif index == 0: # insert at head
lowercase__ : int = self.head
lowercase__ : int = new_node
else:
lowercase__ : List[str] = self.head
for _ in range(index - 1):
lowercase__ : Union[str, Any] = temp.next
lowercase__ : int = temp.next
lowercase__ : Optional[Any] = new_node
if index == len(self) - 1: # insert at tail
lowercase__ : Union[str, Any] = new_node
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(0)
def lowercase__ ( self):
'''simple docstring'''
return self.delete_nth(len(self) - 1)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ = 0):
'''simple docstring'''
if not 0 <= index < len(self):
raise IndexError("""list index out of range.""")
lowercase__ : str = self.head
if self.head == self.tail: # just one node
lowercase__ : List[Any] = None
elif index == 0: # delete head node
lowercase__ : str = self.tail.next.next
lowercase__ : List[Any] = self.head.next
else:
lowercase__ : Optional[int] = self.head
for _ in range(index - 1):
lowercase__ : List[Any] = temp.next
lowercase__ : Optional[Any] = temp.next
lowercase__ : Dict = temp.next.next
if index == len(self) - 1: # delete at tail
lowercase__ : Tuple = temp
return delete_node.data
def lowercase__ ( self):
'''simple docstring'''
return len(self) == 0
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase__ : int = CircularLinkedList()
assert len(lowercase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase_ ) == i
circular_linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495
| 0
|
"""simple docstring"""
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_lowerCAmelCase = 6
_lowerCAmelCase = 1
_lowerCAmelCase = 1_9_0_1
_lowerCAmelCase = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_lowerCAmelCase = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_lowerCAmelCase = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 589
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a__ : List[Any] = logging.get_logger(__name__)
def A__ ( __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
__lowerCamelCase, architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
_lowerCAmelCase = torch.load(hf_hub_download(repo_id=__lowerCamelCase, filename='pytorch_model.bin' ) )
_lowerCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
_lowerCAmelCase = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
_lowerCAmelCase = tensor_value
_lowerCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCamelCase, config=__lowerCamelCase, state_dict=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
# convert tokenizer
_lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a__ : Optional[int] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 589
| 1
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = "▁"
SCREAMING_SNAKE_CASE : int = {"vocab_file": "prophetnet.tokenizer"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
SCREAMING_SNAKE_CASE : str = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = collections.OrderedDict()
with open(lowerCAmelCase__ ,'r' ,encoding='utf-8' ) as reader:
A__ = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
A__ = token.rstrip('\n' )
A__ = index
return vocab
class snake_case_ ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_: List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , __a , __a="[SEP]" , __a="[SEP]" , __a="[SEP]" , __a="[UNK]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a = None , **__a , ):
"""simple docstring"""
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
A__ = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
A__ = f'''[unused{i}]'''
A__ = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
A__ = 12
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , __a ):
"""simple docstring"""
A__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , __a , __a = None , __a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = ''.join(A_ ).replace(A_ , ' ' ).strip()
return out_string
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
A__ = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 703
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 554
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''trocr'''
_lowercase : int = ['''past_key_values''']
_lowercase : Dict = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self: int , UpperCamelCase_: Tuple=50_265 , UpperCamelCase_: List[str]=1_024 , UpperCamelCase_: Dict=12 , UpperCamelCase_: Optional[Any]=16 , UpperCamelCase_: Tuple=4_096 , UpperCamelCase_: Tuple="gelu" , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Dict=True , UpperCamelCase_: Dict=False , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: Tuple=2 , **UpperCamelCase_: str , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = activation_function
lowercase__ = max_position_embeddings
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = init_std
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = scale_embedding
lowercase__ = use_learned_position_embeddings
lowercase__ = layernorm_embedding
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
| 43
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_UpperCamelCase = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _A( lowerCAmelCase ):
A__ : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _A( lowerCAmelCase ):
A__ : str = list(s_dict.keys() )
for key in keys:
A__ : Optional[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : Optional[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(F'''{key} -> {new_key}''' )
A__ : Any = s_dict.pop(lowerCAmelCase )
return s_dict
def _A( lowerCAmelCase ):
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : Optional[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
A__ : int = emb.weight.data
return lin_layer
def _A( lowerCAmelCase , lowerCAmelCase ):
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
A__ : Union[str, Any] = os.path.basename(lowerCAmelCase )
A__ : str = url.split("""/""" )[-2]
A__ : Union[str, Any] = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(lowerCAmelCase ):
A__ : Tuple = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowerCAmelCase , unit_divisor=1024 ) as loop:
while True:
A__ : Dict = source.read(8192 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
A__ : List[str] = open(lowerCAmelCase , """rb""" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A( lowerCAmelCase , lowerCAmelCase ):
if ".pt" not in checkpoint_path:
A__ : str = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowerCAmelCase , map_location="""cpu""" )
A__ : List[str] = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Dict = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
A__ : Any = True
A__ : List[str] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : str = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : int = WhisperForConditionalGeneration(lowerCAmelCase )
A__ , A__ : List[Any] = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
A__ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : Any = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 363
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE = 100 ) -> int:
snake_case_ = set()
snake_case_ = 0
snake_case_ = n + 1 # maximum limit
for a in range(2 , _SCREAMING_SNAKE_CASE ):
for b in range(2 , _SCREAMING_SNAKE_CASE ):
snake_case_ = a**b # calculates the current power
collect_powers.add(_SCREAMING_SNAKE_CASE ) # adds the result to the set
return len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 701
|
"""simple docstring"""
from functools import reduce
__SCREAMING_SNAKE_CASE : Tuple = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _a ( _SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 2
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowerCAmelCase : List[str] = logging.getLogger(__name__)
class __magic_name__ :
def __init__( self ) -> Dict:
'''simple docstring'''
__a =False
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
__a =RagRetriever(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
__a =True
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.retriever.index.init_index()
def __magic_name__ ( self , __snake_case , __snake_case ) -> Optional[Any]:
'''simple docstring'''
__a , __a =self.retriever._main_retrieve(__snake_case , __snake_case )
return doc_ids, retrieved_doc_embeds
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(__snake_case ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , index=__snake_case , init_retrieval=__snake_case , )
__a =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__snake_case , __snake_case , __snake_case , __snake_case )
for worker in self.retrieval_workers
] )
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __magic_name__ ( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__a =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__a , __a =ray.get(random_worker.retrieve.remote(__snake_case , __snake_case ) )
else:
__a , __a =self._main_retrieve(__snake_case , __snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case=None , **__snake_case ) -> List[str]:
'''simple docstring'''
return super(__snake_case , cls ).get_tokenizers(__snake_case , __snake_case , **__snake_case )
@classmethod
def __magic_name__ ( cls , __snake_case , __snake_case , __snake_case=None , **__snake_case ) -> Dict:
'''simple docstring'''
__a =kwargs.pop('config' , __snake_case ) or RagConfig.from_pretrained(__snake_case , **__snake_case )
__a =RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
__a =rag_tokenizer.question_encoder
__a =rag_tokenizer.generator
if indexed_dataset is not None:
__a ='custom'
__a =CustomHFIndex(config.retrieval_vector_size , __snake_case )
else:
__a =cls._build_index(__snake_case )
return cls(
__snake_case , question_encoder_tokenizer=__snake_case , generator_tokenizer=__snake_case , retrieval_workers=__snake_case , index=__snake_case , )
| 242
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'convbert'
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=768 , __snake_case=2 , __snake_case=9 , __snake_case=1 , __snake_case=None , **__snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =initializer_range
__a =layer_norm_eps
__a =embedding_size
__a =head_ratio
__a =conv_kernel_size
__a =num_groups
__a =classifier_dropout
class __magic_name__ ( lowerCAmelCase_ ):
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__a ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 242
| 1
|
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
a_ : Dict = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
def __init__(self , **__a ):
'''simple docstring'''
requires_backends(self , ["bs4"] )
super().__init__(**__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCamelCase = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
lowerCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = BeautifulSoup(__a , "html.parser" )
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCamelCase = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
lowerCamelCase , lowerCamelCase = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = ""
for tagname, subs in zip(__a , __a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__(self , __a ):
'''simple docstring'''
lowerCamelCase = False
# Check that strings has a valid type
if isinstance(__a , __a ):
lowerCamelCase = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
lowerCamelCase = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(__a )}.""" )
lowerCamelCase = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
lowerCamelCase = [html_strings]
# Get nodes + xpaths
lowerCamelCase = []
lowerCamelCase = []
for html_string in html_strings:
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.get_three_from_single(__a )
nodes.append(__a )
lowerCamelCase = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
lowerCamelCase = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
lowerCamelCase = {"nodes": nodes, "xpaths": xpaths}
lowerCamelCase = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 484
|
a_ : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 484
| 1
|
import os
def __lowerCamelCase ( UpperCamelCase__ = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
snake_case_ = in_file.read()
snake_case_ = [[int(UpperCamelCase__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
snake_case_ = [[0 for cell in row] for row in grid]
snake_case_ = len(grid[0] )
snake_case_ = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
snake_case_ = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
snake_case_ = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
snake_case_ = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
snake_case_ = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362
|
import argparse
import struct
import unittest
class lowercase :
def __init__( self , snake_case ):
snake_case_ = data
# Initialize hash values
snake_case_ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
snake_case_ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
snake_case_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def a ( snake_case ):
snake_case_ = b'\x80' + (b'\x00' * (63 - (len(snake_case ) + 8) % 64))
snake_case_ = struct.pack('>Q' , (len(snake_case ) * 8) )
return data + padding + big_endian_integer
def a ( self ):
# Convert into blocks of 64 bytes
snake_case_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case_ = list(struct.unpack('>16L' , snake_case ) )
# add 48 0-ed integers
words += [0] * 48
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
snake_case_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
snake_case_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
snake_case_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
snake_case_ = self.ror(snake_case , 6 ) ^ self.ror(snake_case , 11 ) ^ self.ror(snake_case , 25 )
snake_case_ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
snake_case_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
snake_case_ = self.ror(snake_case , 2 ) ^ self.ror(snake_case , 13 ) ^ self.ror(snake_case , 22 )
snake_case_ = (a & b) ^ (a & c) ^ (b & c)
snake_case_ = (sa + maj) % 0x100000000
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
snake_case_ = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case_ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
snake_case_ = ''.join([hex(snake_case )[2:].zfill(8 ) for value in self.hashes] )
def a ( self , snake_case , snake_case ):
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
def a ( self ):
import hashlib
snake_case_ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(snake_case ).hash , hashlib.shaaaa(snake_case ).hexdigest() )
def __lowerCamelCase ( ):
'''simple docstring'''
import doctest
doctest.testmod()
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
snake_case_ = parser.parse_args()
snake_case_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
snake_case_ = f.read()
else:
snake_case_ = bytes(UpperCamelCase__ , 'utf-8' )
print(SHAaaa(UpperCamelCase__ ).hash )
if __name__ == "__main__":
main()
| 362
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : int =logging.get_logger(__name__)
UpperCAmelCase : Tuple ={
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """deit"""
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = encoder_stride
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = version.parse("""1.11""" )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 1e-4
| 504
|
import sys
UpperCAmelCase : Union[str, Any] =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowerCAmelCase = N):
UpperCamelCase_ = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase) - 12):
UpperCamelCase_ = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
UpperCamelCase_ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 504
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowerCAmelCase : Tuple = 25_6047
__lowerCAmelCase : int = 25_6145
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = NllbTokenizer
_lowerCamelCase = NllbTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
_lowerCamelCase = {}
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : str = NllbTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = NllbTokenizer(_lowercase , keep_accents=_lowercase )
snake_case_ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
snake_case_ : Any = tempfile.mkdtemp()
snake_case_ : Optional[int] = tokenizer_r.save_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case_ : Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Optional[int] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=True
snake_case_ : Optional[Any] = tempfile.mkdtemp()
snake_case_ : Optional[Any] = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it save with the same files
self.assertSequenceEqual(_lowercase , _lowercase )
# Checks everything loads correctly in the same way
snake_case_ : Union[str, Any] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : str = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
# Save tokenizer rust, legacy_format=False
snake_case_ : str = tempfile.mkdtemp()
snake_case_ : Any = tokenizer_r.save_pretrained(_lowercase , legacy_format=_lowercase )
snake_case_ : Optional[int] = tokenizer_p.save_pretrained(_lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ : List[str] = tokenizer_r.from_pretrained(_lowercase )
snake_case_ : Optional[int] = tokenizer_p.from_pretrained(_lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowercase , _lowercase ) )
shutil.rmtree(_lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
snake_case_ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
snake_case_ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
snake_case_ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
snake_case_ : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , tgt_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
snake_case_ : str = tokenizer.prepare_seqaseq_batch(
_lowercase , tgt_texts=_lowercase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowercase , max_length=3 , max_target_length=1_0 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowercase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
snake_case_ : List[str] = [AddedToken("""<special>""" , lstrip=_lowercase )]
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Dict = tokenizer_r.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowercase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(
_lowercase , additional_special_tokens=_lowercase , **_lowercase )
snake_case_ : Tuple = tokenizer_p.encode("""Hey this is a <special> token""" )
snake_case_ : int = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = '''facebook/nllb-200-distilled-600M'''
_lowerCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowerCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
snake_case_ : Dict = 1
return cls
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 2_5_6_0_5_7 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ : List[Any] = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
snake_case_ : int = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
snake_case_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _lowercase )
snake_case_ : Optional[int] = 1_0
snake_case_ : Optional[int] = self.tokenizer(_lowercase , max_length=_lowercase , truncation=_lowercase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowercase )
self.assertEqual(len(_lowercase ) , _lowercase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_6_2_0_3, 3] )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = tempfile.mkdtemp()
snake_case_ : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowercase )
snake_case_ : Union[str, Any] = NllbTokenizer.from_pretrained(_lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowercase )
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
snake_case_ : Optional[Any] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
snake_case_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowercase )
self.assertEqual(_lowercase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer(self.src_text , padding=_lowercase , truncation=_lowercase , max_length=3 , return_tensors="""pt""" )
snake_case_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_lowercase , truncation=_lowercase , max_length=1_0 , return_tensors="""pt""" )
snake_case_ : str = targets["""input_ids"""]
snake_case_ : Optional[int] = shift_tokens_right(
_lowercase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowercase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_6_0_5_7,
} , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = True
snake_case_ : Optional[int] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
snake_case_ : str = False
snake_case_ : List[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 58
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_a = size if size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = DPTImageProcessor if is_vision_available() else None
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = DPTImageProcessingTester(self )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> Dict:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 11
| 0
|
"""simple docstring"""
import qiskit
def lowercase (_lowerCAmelCase = 2 ):
__lowerCAmelCase = qubits
# Using Aer's simulator
__lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
__lowerCAmelCase = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCAmelCase ) ) , list(range(_lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__lowerCAmelCase = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(F"Total count for various states are: {quantum_entanglement(3)}")
| 573
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''pegasus'''
_snake_case = ['''past_key_values''']
_snake_case = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , snake_case_=50_265 , snake_case_=1_024 , snake_case_=12 , snake_case_=4_096 , snake_case_=16 , snake_case_=12 , snake_case_=4_096 , snake_case_=16 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=True , snake_case_=True , snake_case_="gelu" , snake_case_=1_024 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=0 , snake_case_=False , snake_case_=0 , snake_case_=1 , snake_case_=1 , **snake_case_ , ) -> List[str]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
@property
def A__ ( self ) -> int:
return self.encoder_attention_heads
@property
def A__ ( self ) -> int:
return self.d_model
| 573
| 1
|
import unittest
import numpy as np
def a_ ( _A , _A , _A , _A = None , ) -> np.ndarray:
"""simple docstring"""
snake_case__ = np.shape(_A )
snake_case__ = np.shape(_A )
snake_case__ = np.shape(_A )
if shape_a[0] != shape_b[0]:
snake_case__ = (
'Expected the same number of rows for A and B. '
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_A )
if shape_b[1] != shape_c[1]:
snake_case__ = (
'Expected the same number of columns for B and C. '
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_A )
snake_case__ = pseudo_inv
if a_inv is None:
try:
snake_case__ = np.linalg.inv(_A )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Dict ) -> None:
snake_case__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case__ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case__ = np.array([[2, 1], [6, 3]] )
snake_case__ = schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
snake_case__ = np.block([[a, b], [b.T, c]] )
snake_case__ = np.linalg.det(UpperCamelCase )
snake_case__ = np.linalg.det(UpperCamelCase )
snake_case__ = np.linalg.det(UpperCamelCase )
self.assertAlmostEqual(UpperCamelCase , det_a * det_s )
def lowerCAmelCase_ ( self: Any ) -> None:
snake_case__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case__ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> None:
snake_case__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case__ = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase ):
schur_complement(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 328
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 328
| 1
|
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
A_ = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
A_ = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
A_ = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
A_ = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 ) -> Optional[Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
lowerCamelCase_ = []
lowerCamelCase_ = Counter()
lowerCamelCase_ = 0
lowerCamelCase_ = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
lowerCamelCase_ = candidate + '\n' + test_case
lowerCamelCase_ = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase_ = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase_ = [], []
for result in results.values():
result.sort()
lowerCamelCase_ = [r[1]['passed'] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = np.array(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = k
lowerCamelCase_ = {f'''pass@{k}''': estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
def estimator(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = itertools.repeat(SCREAMING_SNAKE_CASE_ ,len(SCREAMING_SNAKE_CASE_ ) )
else:
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = iter(SCREAMING_SNAKE_CASE_ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE_ ) ,int(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) for n, c in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )] )
| 703
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> float:
return 10 - x * x
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(__UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__UpperCamelCase ) * equation(__UpperCamelCase ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 384
| 0
|
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
_a : str = """Hello world! cécé herlolip"""
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> str:
__lowerCAmelCase = FairseqRobertaModel.from_pretrained(UpperCamelCase_ )
roberta.eval() # disable dropout
__lowerCAmelCase = roberta.model.encoder.sentence_encoder
__lowerCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , UpperCamelCase_ )
__lowerCAmelCase = XLMRobertaXLForSequenceClassification(UpperCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(UpperCamelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase = roberta_sent_encoder.embed_tokens.weight
__lowerCAmelCase = roberta_sent_encoder.embed_positions.weight
__lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCAmelCase = roberta_sent_encoder.layer_norm.weight
__lowerCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase = model.roberta.encoder.layer[i]
__lowerCAmelCase = roberta_sent_encoder.layers[i]
__lowerCAmelCase = layer.attention
__lowerCAmelCase = roberta_layer.self_attn_layer_norm.weight
__lowerCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCAmelCase = roberta_layer.self_attn.q_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.q_proj.bias
__lowerCAmelCase = roberta_layer.self_attn.k_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.k_proj.bias
__lowerCAmelCase = roberta_layer.self_attn.v_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCAmelCase = roberta_layer.self_attn.out_proj.weight
__lowerCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCAmelCase = roberta_layer.final_layer_norm.weight
__lowerCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase = roberta_layer.fca.weight
__lowerCAmelCase = roberta_layer.fca.bias
# output
__lowerCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase = roberta_layer.fca.weight
__lowerCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""].dense.weight
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""].dense.bias
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCAmelCase = roberta.model.encoder.lm_head.dense.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.dense.bias
__lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase = roberta.model.encoder.lm_head.weight
__lowerCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase = roberta.encode(UpperCamelCase_ ).unsqueeze(0 ) # batch of size 1
__lowerCAmelCase = model(UpperCamelCase_ )[0]
if classification_head:
__lowerCAmelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(UpperCamelCase_ ) )
else:
__lowerCAmelCase = roberta.model(UpperCamelCase_ )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCAmelCase = torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(UpperCamelCase_ ).mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
_a : str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 689
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
create_all_state(1 , UpperCamelCase_ , UpperCamelCase_ , [] , UpperCamelCase_ )
return result
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(UpperCamelCase_ , total_number - level + 2 ):
current_list.append(UpperCamelCase_ )
create_all_state(i + 1 , UpperCamelCase_ , level - 1 , UpperCamelCase_ , UpperCamelCase_ )
current_list.pop()
def _lowerCAmelCase ( UpperCamelCase_ ):
for i in total_list:
print(*UpperCamelCase_ )
if __name__ == "__main__":
__magic_name__ = 4
__magic_name__ = 2
__magic_name__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 155
| 0
|
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE__ = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE__ = '''document_qa'''
SCREAMING_SNAKE_CASE__ = AutoProcessor
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE__ = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ = ['''text''']
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_lowerCAmelCase = task_prompt.replace('''{user_input}''' , _lowerCAmelCase )
_lowerCAmelCase = self.pre_processor.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors='''pt''' ).input_ids
_lowerCAmelCase = self.pre_processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCAmelCase , ).sequences
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.pre_processor.batch_decode(_lowerCAmelCase )[0]
_lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
_lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
_lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , _lowerCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase = self.pre_processor.tokenajson(_lowerCAmelCase )
return sequence["answer"]
| 708
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
UpperCAmelCase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
UpperCAmelCase_ = 1_0
UpperCAmelCase_ = 2_5_6
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] )->Optional[MinHash]:
if len(_SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase = MinHash(num_perm=_SCREAMING_SNAKE_CASE )
for token in set(_SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Set[str]:
return {t for t in NON_ALPHA.split(_SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class UpperCAmelCase :
def __init__( self , *,
_lowerCAmelCase = 0.85 , ):
_lowerCAmelCase = duplication_jaccard_threshold
_lowerCAmelCase = NUM_PERM
_lowerCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase = defaultdict(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self._index.query(_lowerCAmelCase )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase = [base] + list(_lowerCAmelCase )
# reformat the cluster to be a list of dict
_lowerCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCAmelCase )
return duplicate_clusters
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = self.get_duplicate_clusters()
with open(_lowerCAmelCase , '''w''' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str )->Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = element
_lowerCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] )->Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_SCREAMING_SNAKE_CASE , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float )->str:
_lowerCAmelCase = DuplicationIndex(duplication_jaccard_threshold=_SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_0_0 ) ):
di.add(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str )->float:
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = get_tokens(_SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
UpperCAmelCase_ = None
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any )->List[Any]:
_lowerCAmelCase = []
for elementa in cluster:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_lowerCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase = 1
extremes.append(_SCREAMING_SNAKE_CASE )
return extremes
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str )->Tuple:
global _shared_dataset
_lowerCAmelCase = dataset
_lowerCAmelCase = []
_lowerCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=_SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) , total=len(_SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(_SCREAMING_SNAKE_CASE )
return extremes_list
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Type[Dataset] , _SCREAMING_SNAKE_CASE : float = 0.85 )->Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase = make_duplicate_clusters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase = {}
_lowerCAmelCase = find_extremes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase = element
_lowerCAmelCase = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase = dataset.filter(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=_SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f'''Original dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Number of duplicate clusters: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Unique files in duplicate cluster: {len(_SCREAMING_SNAKE_CASE )}''' )
print(f'''Filtered dataset size: {len(_SCREAMING_SNAKE_CASE )}''' )
return ds_filter, duplicate_clusters
| 664
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[2, 2, 3, 2] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=["stage2", "stage3", "stage4"] , UpperCamelCase__=[2, 3, 4] , UpperCamelCase__=None , ) -> str:
lowerCamelCase : Tuple = parent
lowerCamelCase : int = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : List[Any] = num_stages
lowerCamelCase : Optional[Any] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Tuple = is_training
lowerCamelCase : Any = use_labels
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Tuple = out_features
lowerCamelCase : Tuple = out_indices
lowerCamelCase : Dict = scope
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : Dict = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Union[str, Any] = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Any = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : Any = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[Any] = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[Any] = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Any = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self ) -> str:
lowerCamelCase : Tuple = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = config_and_inputs
lowerCamelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Any = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Tuple = False
def _lowercase ( self ) -> List[str]:
lowerCamelCase : int = ConvNextModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ) -> Any:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def _lowercase ( self ) -> List[str]:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def _lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def _lowercase ( self ) -> Any:
pass
def _lowercase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = model_class(UpperCamelCase__ )
lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Dict = [*signature.parameters.keys()]
lowerCamelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def _lowercase ( self ) -> Any:
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Dict:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> List[Any]:
lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[Any] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class UpperCamelCase__ (unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase_ : Optional[int] = ConvNextConfig
lowerCamelCase_ : Any = False
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[Any] = ConvNextModelTester(self )
| 311
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : List[Any] = BlenderbotSmallConfig
lowerCamelCase_ : int = {}
lowerCamelCase_ : Optional[int] = """gelu"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=20 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , ) -> List[Any]:
lowerCamelCase : Any = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : int = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Optional[Any] = eos_token_id
lowerCamelCase : List[Any] = pad_token_id
lowerCamelCase : int = bos_token_id
def _lowercase ( self ) -> int:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase : Optional[int] = prepare_blenderbot_small_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : Dict = TFBlenderbotSmallModel(config=UpperCamelCase__ ).get_decoder()
lowerCamelCase : str = inputs_dict["input_ids"]
lowerCamelCase : List[str] = input_ids[:1, :]
lowerCamelCase : str = inputs_dict["attention_mask"][:1, :]
lowerCamelCase : str = inputs_dict["head_mask"]
lowerCamelCase : Tuple = 1
# first forward pass
lowerCamelCase : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : Dict = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowerCamelCase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : int = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,) -> Optional[int]:
if attention_mask is None:
lowerCamelCase : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
lowerCamelCase : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCamelCase_ : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase_ : int = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : str = False
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = TFBlenderbotSmallModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : int = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
lowerCamelCase_ : Dict = """facebook/blenderbot_small-90M"""
@cached_property
def _lowercase ( self ) -> Optional[int]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase : int = self.tokenizer(self.src_text , return_tensors="tf" )
lowerCamelCase : Tuple = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
lowerCamelCase : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 311
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ = logging.get_logger(__name__)
a_ = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
snake_case_ = """deberta-v2"""
def __init__( self : Dict , __lowercase : Dict=12_81_00 , __lowercase : Any=15_36 , __lowercase : Union[str, Any]=24 , __lowercase : Optional[int]=24 , __lowercase : Any=61_44 , __lowercase : List[str]="gelu" , __lowercase : List[str]=0.1 , __lowercase : int=0.1 , __lowercase : Dict=5_12 , __lowercase : Optional[int]=0 , __lowercase : Union[str, Any]=0.02 , __lowercase : str=1e-7 , __lowercase : Union[str, Any]=False , __lowercase : str=-1 , __lowercase : str=0 , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=None , __lowercase : Any=0 , __lowercase : Any="gelu" , **__lowercase : Optional[int] , ) -> Optional[Any]:
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE__ : Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE__ : int =initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] =relative_attention
SCREAMING_SNAKE_CASE__ : Dict =max_relative_positions
SCREAMING_SNAKE_CASE__ : Any =pad_token_id
SCREAMING_SNAKE_CASE__ : Any =position_biased_input
# Backwards compatibility
if type(__A ) == str:
SCREAMING_SNAKE_CASE__ : List[Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE__ : List[str] =pos_att_type
SCREAMING_SNAKE_CASE__ : Tuple =vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] =kwargs.get('''pooler_hidden_size''' , __A )
SCREAMING_SNAKE_CASE__ : Tuple =pooler_dropout
SCREAMING_SNAKE_CASE__ : str =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
@property
def __magic_name__ ( self : str ) -> Tuple:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int ={0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : List[Any] ={0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __magic_name__ ( self : Dict ) -> Dict:
return 12
def __magic_name__ ( self : List[Any] , __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 3 , __lowercase : int = 40 , __lowercase : int = 40 , __lowercase : "PreTrainedTokenizerBase" = None , ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int =super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 703
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a_ = {
'bert-base-uncased': 5_1_2,
'bert-large-uncased': 5_1_2,
'bert-base-cased': 5_1_2,
'bert-large-cased': 5_1_2,
'bert-base-multilingual-uncased': 5_1_2,
'bert-base-multilingual-cased': 5_1_2,
'bert-base-chinese': 5_1_2,
'bert-base-german-cased': 5_1_2,
'bert-large-uncased-whole-word-masking': 5_1_2,
'bert-large-cased-whole-word-masking': 5_1_2,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2,
'bert-base-cased-finetuned-mrpc': 5_1_2,
'bert-base-german-dbmdz-cased': 5_1_2,
'bert-base-german-dbmdz-uncased': 5_1_2,
'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2,
'wietsedv/bert-base-dutch-cased': 5_1_2,
}
a_ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Any =do_lower_case
SCREAMING_SNAKE_CASE__ : Any =strip_accents
SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case
def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 665
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.