code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A (__SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
__lowercase: List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase: Union[str, Any] = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowercase: Dict = ["""image_embeds""", """negative_image_embeds""", """image""", """hint"""]
__lowercase: Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowercase: Optional[Any] = False
@property
def lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
return 100
@property
def lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case_ = UNetaDConditionModel(**_A )
return model
@property
def lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.dummy_unet
snake_case_ = self.dummy_movq
snake_case_ = {
"""num_train_timesteps""": 1_000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00_085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case_ = DDIMScheduler(**_A )
snake_case_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=0 ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
snake_case_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("""mps""" ):
snake_case_ = torch.manual_seed(_A )
else:
snake_case_ = torch.Generator(device=_A ).manual_seed(_A )
snake_case_ = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = """cpu"""
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**_A )
snake_case_ = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
snake_case_ = pipe(**self.get_dummy_inputs(_A ) )
snake_case_ = output.images
snake_case_ = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case_ = init_image.resize((512, 512) )
snake_case_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case_ = torch.from_numpy(np.array(_A ) ).float() / 255.0
snake_case_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case_ = """A robot, 4k photo"""
snake_case_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
snake_case_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case_ = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
snake_case_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case_ , snake_case_ = pipe_prior(
_A , image=_A , strength=0.85 , generator=_A , negative_prompt="""""" , ).to_tuple()
snake_case_ = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , hint=_A , generator=_A , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A , _A )
| 347 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(number**0.5 )
return number == sq * sq
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE_ = x_den * y_den * z_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__ ( __lowerCamelCase = 35 ):
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Fraction(0 )
SCREAMING_SNAKE_CASE_ = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
SCREAMING_SNAKE_CASE_ = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE_ = x_den * x_den * y_den * y_den
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=-1
SCREAMING_SNAKE_CASE_ = x_num * y_num
SCREAMING_SNAKE_CASE_ = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
# n=2
SCREAMING_SNAKE_CASE_ = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCamelCase ) and is_sq(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = int(sqrt(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = gcd(__lowerCamelCase, __lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE_ = add_three(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
unique_s.add(__lowerCamelCase )
for num, den in unique_s:
total += Fraction(__lowerCamelCase, __lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 0 |
def _A ( lowerCAmelCase_ : int = 1000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 221 |
from math import pi, sqrt
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 221 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Optional[Any] = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[int] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__snake_case :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 49 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize )->Any:
'''simple docstring'''
A_ : Dict = '''bilinear'''
A_ : Optional[Any] = max_size
A_ : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = []
for img in imgs:
A_ , A_ : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
A_ : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A_ : int = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
A_ , A_ : Tuple = size, scale * w
else:
A_ , A_ : List[str] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
A_ : List[Any] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = newh * scale
A_ : List[str] = neww * scale
A_ : List[Any] = int(neww + 0.5 )
A_ : Tuple = int(newh + 0.5 )
if img.dtype == np.uinta:
A_ : List[str] = Image.fromarray(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A_ : Dict = np.asarray(_SCREAMING_SNAKE_CASE )
else:
A_ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A_ : List[str] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Tuple = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A_ : Union[str, Any] = cfg.INPUT.FORMAT
A_ : int = cfg.SIZE_DIVISIBILITY
A_ : Tuple = cfg.PAD_VALUE
A_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
A_ : List[str] = cfg.MODEL.DEVICE
A_ : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Any = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A_ : List[Any] = [im.shape[-2:] for im in images]
A_ : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Dict:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Dict = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A_ : List[str] = torch.tensor([im.shape[:2] for im in images] )
A_ : Union[str, Any] = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A_ : List[str] = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A_ , A_ : Any = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A_ : str = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
A_ , A_ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 186 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 133 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : Tuple = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" " )
lowerCAmelCase : List[str] = temp.next
print()
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = Node(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.head
lowerCAmelCase : Optional[Any] = new_node
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
lowerCAmelCase : str = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : Union[str, Any] = node_a.next
lowerCAmelCase : Any = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
lowerCAmelCase , lowerCAmelCase : str = node_a.data, node_a.data
if __name__ == "__main__":
lowerCAmelCase__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 133 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCAmelCase__ = 250004
lowerCAmelCase__ = 250020
@require_sentencepiece
@require_tokenizers
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = MBartaaTokenizer
snake_case__ : Tuple = MBartaaTokenizerFast
snake_case__ : Any = True
snake_case__ : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '''<s>'''
_lowerCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowerCAmelCase ) , 1_0_5_4 )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = MBartaaTokenizer(__lowerCAmelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCamelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCamelCase : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : int = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_lowerCamelCase : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : int = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
_lowerCamelCase : int = tempfile.mkdtemp()
_lowerCamelCase : List[str] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
_lowerCamelCase : Tuple = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
_lowerCamelCase : List[Any] = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
_lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCamelCase : Optional[Any] = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCamelCase : Dict = tokenizer_r.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Dict = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase):
snake_case__ : int = "facebook/mbart-large-50-one-to-many-mmt"
snake_case__ : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
snake_case__ : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
snake_case__ : int = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ):
"""simple docstring"""
_lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_lowerCamelCase : Any = 1
return cls
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
_lowerCamelCase : Optional[Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowerCamelCase : Optional[Any] = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = 1_0
_lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = tempfile.mkdtemp()
_lowerCamelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Any = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors='''pt''' )
_lowerCamelCase : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_lowerCamelCase : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowerCamelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
_lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0 , return_tensors='''pt''' )
_lowerCamelCase : List[Any] = targets['''input_ids''']
_lowerCamelCase : int = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 72 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowercase : Union[str, Any] = False
class lowerCAmelCase__ ( unittest.TestCase ):
def _snake_case ( self , __SCREAMING_SNAKE_CASE=32 ):
"""simple docstring"""
set_seed(0 )
lowercase_ : Dict = UNetaDModel(sample_size=__SCREAMING_SNAKE_CASE , in_channels=3 , out_channels=3 )
lowercase_ : Any = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase_ : int = DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__SCREAMING_SNAKE_CASE , )
lowercase_ : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__SCREAMING_SNAKE_CASE , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase_ : Optional[int] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowercase_ : Optional[int] = [torch.randn((4, 3, 32, 32) ).to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowercase_ : int = [torch.randint(0 , 10_00 , (4,) ).long().to(__SCREAMING_SNAKE_CASE ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase_ , lowercase_ : Any = self.get_model_optimizer(resolution=32 )
model.train().to(__SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowercase_ : Union[str, Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase_ : int = model(__SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowercase_ : Union[str, Any] = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase_ , lowercase_ : Tuple = self.get_model_optimizer(resolution=32 )
model.train().to(__SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowercase_ : Any = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase_ : List[Any] = model(__SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowercase_ : List[Any] = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-5 ) )
| 264 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__SCREAMING_SNAKE_CASE ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 264 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowerCamelCase_ ( _a : str , _a : Any=100 , _a : int=" " ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = text.split(_a )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_a ) , _a )]
def lowerCamelCase_ ( _a : dict ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(_a ):
titles.append(title if title is not None else """""" )
texts.append(_a )
return {"title": titles, "text": texts}
def lowerCamelCase_ ( _a : dict , _a : DPRContextEncoder , _a : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=_a , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
UpperCAmelCase_ : Tuple = ctx_encoder(input_ids.to(device=_a ) , return_dict=_a ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCamelCase_ ( _a : "RagExampleArguments" , _a : "ProcessingArguments" , _a : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
UpperCAmelCase_ : Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
UpperCAmelCase_ : Tuple = dataset.map(_a , batched=_a , num_proc=processing_args.num_proc )
# And compute the embeddings
UpperCAmelCase_ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_a )
UpperCAmelCase_ : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
UpperCAmelCase_ : Any = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
UpperCAmelCase_ : List[str] = dataset.map(
partial(_a , ctx_encoder=_a , ctx_tokenizer=_a ) , batched=_a , batch_size=processing_args.batch_size , features=_a , )
# And finally save your dataset
UpperCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(_a )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
UpperCAmelCase_ : Union[str, Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=_a )
# And save the index
UpperCAmelCase_ : Optional[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(_a )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _snake_case :
'''simple docstring'''
A__ : str = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
A__ : Optional[str] = field(
default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
A__ : str = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
A__ : str = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
A__ : Optional[str] = field(
default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : Optional[int] = field(
default=__snake_case , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
A__ : int = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class _snake_case :
'''simple docstring'''
A__ : int = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
A__ : int = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 345 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple=0.2 ,lowerCamelCase_: Union[str, Any]=0.2 ) -> List[str]:
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : str = bp_numa
UpperCAmelCase_ : List[Any] = bp_numa
UpperCAmelCase_ : Optional[int] = conva_get[:2]
UpperCAmelCase_ : List[Any] = conva_get[2]
UpperCAmelCase_ : str = size_pa
UpperCAmelCase_ : Optional[int] = rate_w
UpperCAmelCase_ : Dict = rate_t
UpperCAmelCase_ : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : int = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
UpperCAmelCase_ : Dict = -2 * np.random.rand(self.conva[1] ) + 1
UpperCAmelCase_ : str = -2 * np.random.rand(self.num_bpa ) + 1
UpperCAmelCase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def A__ ( self: str ,lowerCamelCase_: Optional[Any] ) -> Tuple:
# save model dict with pickle
UpperCAmelCase_ : Dict = {
"""num_bp1""": self.num_bpa,
"""num_bp2""": self.num_bpa,
"""num_bp3""": self.num_bpa,
"""conv1""": self.conva,
"""step_conv1""": self.step_conva,
"""size_pooling1""": self.size_poolinga,
"""rate_weight""": self.rate_weight,
"""rate_thre""": self.rate_thre,
"""w_conv1""": self.w_conva,
"""wkj""": self.wkj,
"""vji""": self.vji,
"""thre_conv1""": self.thre_conva,
"""thre_bp2""": self.thre_bpa,
"""thre_bp3""": self.thre_bpa,
}
with open(lowerCamelCase_ ,"""wb""" ) as f:
pickle.dump(lowerCamelCase_ ,lowerCamelCase_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def A__ ( cls: List[str] ,lowerCamelCase_: str ) -> List[str]:
# read saved model
with open(lowerCamelCase_ ,"""rb""" ) as f:
UpperCAmelCase_ : Any = pickle.load(lowerCamelCase_ ) # noqa: S301
UpperCAmelCase_ : Union[str, Any] = model_dic.get("""conv1""" )
conv_get.append(model_dic.get("""step_conv1""" ) )
UpperCAmelCase_ : List[str] = model_dic.get("""size_pooling1""" )
UpperCAmelCase_ : Tuple = model_dic.get("""num_bp1""" )
UpperCAmelCase_ : Optional[Any] = model_dic.get("""num_bp2""" )
UpperCAmelCase_ : List[str] = model_dic.get("""num_bp3""" )
UpperCAmelCase_ : List[Any] = model_dic.get("""rate_weight""" )
UpperCAmelCase_ : Dict = model_dic.get("""rate_thre""" )
# create model instance
UpperCAmelCase_ : List[Any] = CNN(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
# modify model parameter
UpperCAmelCase_ : Any = model_dic.get("""w_conv1""" )
UpperCAmelCase_ : int = model_dic.get("""wkj""" )
UpperCAmelCase_ : int = model_dic.get("""vji""" )
UpperCAmelCase_ : Optional[int] = model_dic.get("""thre_conv1""" )
UpperCAmelCase_ : List[str] = model_dic.get("""thre_bp2""" )
UpperCAmelCase_ : Dict = model_dic.get("""thre_bp3""" )
return conv_ins
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return 1 / (1 + np.exp(-1 * x ))
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ) -> Optional[Any]:
return round(lowerCamelCase_ ,3 )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Any:
# convolution process
UpperCAmelCase_ : Optional[Any] = convs[0]
UpperCAmelCase_ : int = convs[1]
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )[0]
# get the data slice of original image data, data_focus
UpperCAmelCase_ : Dict = []
for i_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
for j_focus in range(0 ,size_data - size_conv + 1 ,lowerCamelCase_ ):
UpperCAmelCase_ : Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowerCamelCase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = []
for i_focus in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowerCamelCase_ ) )
UpperCAmelCase_ : Union[str, Any] = np.asmatrix(lowerCamelCase_ ).reshape(
lowerCamelCase_ ,lowerCamelCase_ )
data_featuremap.append(lowerCamelCase_ )
# expanding the data slice to One dimenssion
UpperCAmelCase_ : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowerCamelCase_ ) )
UpperCAmelCase_ : Optional[int] = np.asarray(lowerCamelCase_ )
return focus_list, data_featuremap
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any]="average_pool" ) -> List[Any]:
# pooling process
UpperCAmelCase_ : Optional[Any] = len(featuremaps[0] )
UpperCAmelCase_ : Any = int(size_map / size_pooling )
UpperCAmelCase_ : Optional[int] = []
for i_map in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Any = featuremaps[i_map]
UpperCAmelCase_ : Tuple = []
for i_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j_focus in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowerCamelCase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowerCamelCase_ ) )
UpperCAmelCase_ : int = np.asmatrix(lowerCamelCase_ ).reshape(lowerCamelCase_ ,lowerCamelCase_ )
featuremap_pooled.append(lowerCamelCase_ )
return featuremap_pooled
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> Optional[int]:
# expanding three dimension data to one dimension list
UpperCAmelCase_ : List[Any] = []
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Tuple = np.shape(data[i] )
UpperCAmelCase_ : Optional[int] = data[i].reshape(1 ,shapes[0] * shapes[1] )
UpperCAmelCase_ : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(lowerCamelCase_ )
UpperCAmelCase_ : int = np.asarray(lowerCamelCase_ )
return data_expanded
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[int] ) -> Union[str, Any]:
# expanding matrix to one dimension list
UpperCAmelCase_ : List[Any] = np.asarray(lowerCamelCase_ )
UpperCAmelCase_ : str = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def A__ ( self: str ,lowerCamelCase_: Dict ,lowerCamelCase_: int ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> Union[str, Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = 0
for i_map in range(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = np.ones((size_map, size_map) )
for i in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
for j in range(0 ,lowerCamelCase_ ,lowerCamelCase_ ):
UpperCAmelCase_ : Any = pd_pool[
i_pool
]
UpperCAmelCase_ : List[str] = i_pool + 1
UpperCAmelCase_ : Optional[Any] = np.multiply(
lowerCamelCase_ ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(lowerCamelCase_ )
return pd_all
def A__ ( self: str ,lowerCamelCase_: int ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any=bool ) -> Optional[int]:
# model traning
print("""----------------------Start Training-------------------------""" )
print((""" - - Shape: Train_Data """, np.shape(lowerCamelCase_ )) )
print((""" - - Shape: Teach_Data """, np.shape(lowerCamelCase_ )) )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Any = 10000
while rp < n_repeat and mse >= error_accuracy:
UpperCAmelCase_ : List[str] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(lowerCamelCase_ ) ):
# print('------------Learning Image: %d--------------'%p)
UpperCAmelCase_ : str = np.asmatrix(datas_train[p] )
UpperCAmelCase_ : Optional[Any] = np.asarray(datas_teach[p] )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : List[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : int = np.shape(lowerCamelCase_ )
UpperCAmelCase_ : Dict = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = data_bp_input
UpperCAmelCase_ : Optional[Any] = np.dot(lowerCamelCase_ ,self.vji.T ) - self.thre_bpa
UpperCAmelCase_ : int = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.dot(lowerCamelCase_ ,self.wkj.T ) - self.thre_bpa
UpperCAmelCase_ : Optional[Any] = self.sig(lowerCamelCase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
UpperCAmelCase_ : List[str] = np.multiply(
(data_teach - bp_outa) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : List[Any] = np.multiply(
np.dot(lowerCamelCase_ ,self.wkj ) ,np.multiply(lowerCamelCase_ ,(1 - bp_outa) ) )
UpperCAmelCase_ : Any = np.dot(lowerCamelCase_ ,self.vji )
UpperCAmelCase_ : Tuple = pd_i_all / (self.size_poolinga * self.size_poolinga)
UpperCAmelCase_ : List[str] = pd_conva_pooled.T.getA().tolist()
UpperCAmelCase_ : str = self._calculate_gradient_from_pool(
lowerCamelCase_ ,lowerCamelCase_ ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
UpperCAmelCase_ : List[str] = self._expand_mat(pd_conva_all[k_conv] )
UpperCAmelCase_ : Optional[Any] = self.rate_weight * np.dot(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : int = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
UpperCAmelCase_ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
UpperCAmelCase_ : int = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight
UpperCAmelCase_ : int = self.thre_bpa - pd_k_all * self.rate_thre
UpperCAmelCase_ : str = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
UpperCAmelCase_ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
UpperCAmelCase_ : int = rp + 1
UpperCAmelCase_ : Any = error_count / patterns
all_mse.append(lowerCamelCase_ )
def draw_error():
UpperCAmelCase_ : Any = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowerCamelCase_ ,"""+-""" )
plt.plot(lowerCamelCase_ ,"""r--""" )
plt.xlabel("""Learning Times""" )
plt.ylabel("""All_mse""" )
plt.grid(lowerCamelCase_ ,alpha=0.5 )
plt.show()
print("""------------------Training Complished---------------------""" )
print((""" - - Training epoch: """, rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def A__ ( self: Optional[int] ,lowerCamelCase_: Any ) -> Tuple:
# model predict
UpperCAmelCase_ : Union[str, Any] = []
print("""-------------------Start Testing-------------------------""" )
print((""" - - Shape: Test_Data """, np.shape(lowerCamelCase_ )) )
for p in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : int = np.asmatrix(datas_test[p] )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Optional[Any] = self.pooling(lowerCamelCase_ ,self.size_poolinga )
UpperCAmelCase_ : str = self._expand(lowerCamelCase_ )
UpperCAmelCase_ : str = data_bp_input
UpperCAmelCase_ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
UpperCAmelCase_ : Optional[int] = self.sig(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa
UpperCAmelCase_ : List[Any] = self.sig(lowerCamelCase_ )
produce_out.extend(bp_outa.getA().tolist() )
UpperCAmelCase_ : int = [list(map(self.do_round ,lowerCamelCase_ ) ) for each in produce_out]
return np.asarray(lowerCamelCase_ )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Tuple:
# return the data of image after convoluting process so we can check it out
UpperCAmelCase_ : Optional[int] = np.asmatrix(lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.convolute(
lowerCamelCase_ ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
UpperCAmelCase_ : Dict = self.pooling(lowerCamelCase_ ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
UpperCAmelCase =namedtuple("CoinsDistribResult", "moves excess")
def _A ( _a : List[str] ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(_a : Any ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_a : Tuple ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_a : List[Any] ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
A , A = get_distrib(node.left )
A , A = get_distrib(node.right )
A = 1 - left_distrib_excess
A = 1 - right_distrib_excess
A = (
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
A = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
"""simple docstring"""
def _A ( _a : Optional[int] ):
"""simple docstring"""
A = []
A = set({"""(""", """[""", """{"""} )
A = set({""")""", """]""", """}"""} )
A = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def _A ( ):
"""simple docstring"""
A = input("""Enter sequence of brackets: """ )
if is_balanced(_a ):
print(_a , """is balanced""" )
else:
print(_a , """is not balanced""" )
if __name__ == "__main__":
main()
| 77 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase : Optional[int] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase : Union[str, Any] = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
lowerCAmelCase : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_ : List[str] = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
SCREAMING_SNAKE_CASE_ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE_ : int = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = do_lower_case
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 253 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = ['LayoutLMv2FeatureExtractor']
lowerCAmelCase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ):
"""simple docstring"""
lowercase__ : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=512 , lowerCamelCase__=512 ):
"""simple docstring"""
lowercase__ : Optional[int] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase__ : Tuple = np.array(pil_image.convert("RGB" ) )
lowercase__ : Tuple = arr.astype(np.floataa ) / 127.5 - 1
lowercase__ : List[str] = np.transpose(lowerCamelCase__ , [2, 0, 1] )
lowercase__ : Optional[int] = torch.from_numpy(lowerCamelCase__ ).unsqueeze(0 )
return image
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : DDPMScheduler , SCREAMING_SNAKE_CASE : VQModel , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
lowercase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
# get the original timestep using init_timestep
lowercase__ : int = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE )
lowercase__ : str = max(num_inference_steps - init_timestep , 0 )
lowercase__ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=None ):
if not isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE )}""" )
lowercase__ : Any = image.to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase__ : List[str] = image
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : int = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE )
]
lowercase__ : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
else:
lowercase__ : List[Any] = self.movq.encode(SCREAMING_SNAKE_CASE ).latent_dist.sample(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.movq.config.scaling_factor * init_latents
lowercase__ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase__ : int = init_latents.shape
lowercase__ : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
# get latents
lowercase__ : Any = self.scheduler.add_noise(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = init_latents
return latents
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Dict = torch.device(f"""cuda:{gpu_id}""" )
lowercase__ : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ : List[Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Dict ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : float = 0.3 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = self._execution_device
lowercase__ : Tuple = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Optional[Any] = image_embeds.shape[0]
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
lowercase__ : Optional[int] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Dict = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : str = [image]
if not all(isinstance(SCREAMING_SNAKE_CASE , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(SCREAMING_SNAKE_CASE ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase__ : str = torch.cat([prepare_image(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in image] , dim=0 )
lowercase__ : int = image.to(dtype=image_embeds.dtype , device=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.movq.encode(SCREAMING_SNAKE_CASE )["latents"]
lowercase__ : Dict = latents.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.get_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase__ : List[str] = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
lowercase__ : Tuple = self.prepare_latents(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : Tuple = {"image_embeds": image_embeds}
lowercase__ : Any = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowercase__ : str = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ : Any = noise_pred.chunk(2 )
lowercase__ : List[str] = variance_pred.chunk(2 )
lowercase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ : List[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : Tuple = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowercase__ : Optional[Any] = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase__ : Optional[int] = image * 0.5 + 0.5
lowercase__ : List[str] = image.clamp(0 , 1 )
lowercase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 364 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = {}
lowercase__ : Tuple = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowercase__ : Optional[int] = len(example["content"] ) / len(output["input_ids"] )
return output
lowerCAmelCase__ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 121 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class __a ( a_ ):
# to overwrite at feature extractactor specific tests
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(_lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(_lowerCamelCase , "padding_value" ) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : List[str] = feat_extract.model_input_names[0]
UpperCamelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) )
UpperCamelCase__ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
UpperCamelCase__ : int = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCamelCase__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
UpperCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : Any = feat_extract.model_input_names[0]
UpperCamelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCamelCase__ : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCamelCase__ : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase__ : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase__ : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ):
if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase )
UpperCamelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCamelCase__ : int = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__ : Optional[Any] = self.feat_extract_tester.seq_length_diff
UpperCamelCase__ : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase__ : Any = self.feat_extract_tester.min_seq_length
UpperCamelCase__ : Optional[int] = self.feat_extract_tester.batch_size
UpperCamelCase__ : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase__ : List[Any] = feat_extract.pad(_lowerCamelCase , padding=_lowerCamelCase )
UpperCamelCase__ : List[Any] = input_a[input_name]
UpperCamelCase__ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding="longest" )
UpperCamelCase__ : Optional[int] = input_a[input_name]
UpperCamelCase__ : List[str] = feat_extract.pad(_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCamelCase__ : Optional[int] = input_a[input_name]
UpperCamelCase__ : Dict = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCamelCase__ : Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding="max_length" )[input_name]
UpperCamelCase__ : List[Any] = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=_lowerCamelCase , return_tensors="np" )
UpperCamelCase__ : Optional[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__ : int = feat_extract.pad(_lowerCamelCase , pad_to_multiple_of=10 )
UpperCamelCase__ : Union[str, Any] = input_a[input_name]
UpperCamelCase__ : List[Any] = feat_extract.pad(_lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCamelCase__ : Optional[int] = input_a[input_name]
UpperCamelCase__ : int = feat_extract.pad(
_lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowerCamelCase )
UpperCamelCase__ : str = input_a[input_name]
UpperCamelCase__ : Union[str, Any] = feat_extract.pad(
_lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=_lowerCamelCase , return_tensors="np" , )
UpperCamelCase__ : Optional[Any] = input_a[input_name]
self.assertTrue(all(len(_lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase__ : Tuple = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase__ : Optional[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any]=False ):
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCamelCase__ : Any = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ):
if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase )
UpperCamelCase__ : Optional[int] = feat_extract.model_input_names[0]
UpperCamelCase__ : List[str] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase__ : str = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=_lowerCamelCase )
UpperCamelCase__ : Optional[Any] = input_a[input_name]
UpperCamelCase__ : Union[str, Any] = feat_extract.pad(_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCamelCase__ : Dict = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
# truncate to smallest with np
UpperCamelCase__ : int = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=_lowerCamelCase , )
UpperCamelCase__ : Optional[Any] = input_a[input_name]
UpperCamelCase__ : Tuple = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCamelCase__ : Optional[int] = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
# truncate to middle
UpperCamelCase__ : List[Any] = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase , return_tensors="np" , )
UpperCamelCase__ : Any = input_a[input_name]
UpperCamelCase__ : Tuple = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase )
UpperCamelCase__ : List[Any] = input_a[input_name]
UpperCamelCase__ : int = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCamelCase__ : int = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , truncation=_lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding="longest" , truncation=_lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding="longest" , truncation=_lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCamelCase ):
feat_extract.pad(_lowerCamelCase , padding="max_length" , truncation=_lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase__ : Union[str, Any] = 12
UpperCamelCase__ : int = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , )
UpperCamelCase__ : List[Any] = input_a[input_name]
UpperCamelCase__ : Optional[Any] = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , )
UpperCamelCase__ : Any = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase__ : int = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase__ : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
self._check_padding(numpify=_lowerCamelCase )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_padding(numpify=_lowerCamelCase )
def __lowercase ( self : List[str] ):
'''simple docstring'''
self._check_truncation(numpify=_lowerCamelCase )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_lowerCamelCase )
@require_torch
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__ : Tuple = feat_extract.model_input_names[0]
UpperCamelCase__ : Optional[Any] = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__ : Union[str, Any] = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__ : Any = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__ : Dict = feat_extract.model_input_names[0]
UpperCamelCase__ : str = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__ : List[Any] = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCamelCase__ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.feat_extract_dict
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**_lowerCamelCase )
UpperCamelCase__ : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__ : List[str] = [len(_lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__ : Any = feat_extract.model_input_names[0]
UpperCamelCase__ : Tuple = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__ : str = feat_extract.pad(_lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : str = self.feat_extract_dict
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : List[str] = self.feature_extraction_class(**_lowerCamelCase )
UpperCamelCase__ : int = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase__ : Any = [len(_lowerCamelCase ) for x in speech_inputs]
UpperCamelCase__ : List[Any] = feat_extract.model_input_names[0]
UpperCamelCase__ : Dict = BatchFeature({input_name: speech_inputs} )
UpperCamelCase__ : List[Any] = min(_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = feat_extract.pad(
_lowerCamelCase , padding="max_length" , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , _lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 189 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] ):
'''simple docstring'''
lowercase = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
lowercase = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token') )
return token
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = 'imagenet-1k-id2label.json'
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = num_labels
lowercase = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type='dataset' ) ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = lowercase = CvtConfig(num_labels=__snake_case , idalabel=__snake_case , labelaid=__snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowercase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowercase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowercase = [2, 2, 20]
lowercase = [3, 12, 16]
lowercase = [1_92, 7_68, 10_24]
lowercase = CvtForImageClassification(__snake_case )
lowercase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowercase = image_size
lowercase = torch.load(__snake_case , map_location=torch.device('cpu' ) )
lowercase = OrderedDict()
lowercase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowercase = list_of_state_dict + cls_token(__snake_case )
lowercase = list_of_state_dict + embeddings(__snake_case )
for cnt in range(config.depth[idx] ):
lowercase = list_of_state_dict + attention(__snake_case , __snake_case )
lowercase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__snake_case )
for i in range(len(__snake_case ) ):
lowercase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : Tuple = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 220 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : List[Any] = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : str = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Dict = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : str = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 366 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : List[str] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : str = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : Tuple = attentions
if self.add_upsample:
snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Dict = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Tuple = resnets
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Tuple = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : int = []
for _ in range(self.num_layers ):
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : Optional[int] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=1 / 255 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = do_pad
def A_ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=False ):
if not batched:
SCREAMING_SNAKE_CASE__ = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE__ = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE__ = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE__ = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE__ = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Optional[int] =DetrImageProcessor if is_vision_available() else None
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = DetrImageProcessingTester(self )
@property
def A_ ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , 'do_pad' ) )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def A_ ( self : Any ):
pass
def A_ ( self : List[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self : List[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(UpperCAmelCase_ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self : Any ):
# prepare image and target
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE__ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
SCREAMING_SNAKE_CASE__ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase_ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase_ ) )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase_ ) )
@slow
def A_ ( self : List[Any] ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read() )
SCREAMING_SNAKE_CASE__ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE__ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
SCREAMING_SNAKE_CASE__ = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase_ ) )
# verify boxes
SCREAMING_SNAKE_CASE__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase_ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase_ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase_ ) )
# verify class_labels
SCREAMING_SNAKE_CASE__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase_ ) )
# verify masks
SCREAMING_SNAKE_CASE__ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase_ )
# verify orig_size
SCREAMING_SNAKE_CASE__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase_ ) )
# verify size
SCREAMING_SNAKE_CASE__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase_ ) )
| 176 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ = int(items[1] )
else:
SCREAMING_SNAKE_CASE__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(
UpperCamelCase_ , add_adapter=UpperCamelCase_ , adapter_stride=UpperCamelCase_ , adapter_kernel_size=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , output_hidden_size=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ )
# load model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE__ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , use_auth_token=UpperCamelCase_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ = WavaVecaModel(UpperCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
# load decoder weights
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = MBartaaTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 'mbart50'
SCREAMING_SNAKE_CASE__ = 'wav2vec2'
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 250004
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 176 | 1 |
from math import factorial
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :Any , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Dict = real
if isinstance(a , a ):
__UpperCamelCase : List[str] = [1] * rank
else:
__UpperCamelCase : Optional[int] = rank
def __repr__( self :Any ) -> int:
return (
f'{self.real}+'
f'{"+".join(str(a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a )
def __add__( self :List[Any] , a :List[Any] ) -> Optional[int]:
if not isinstance(a , a ):
return Dual(self.real + other , self.duals )
__UpperCamelCase : int = self.duals.copy()
__UpperCamelCase : List[Any] = other.duals.copy()
if len(a ) > len(a ):
o_dual.extend([1] * (len(a ) - len(a )) )
elif len(a ) < len(a ):
s_dual.extend([1] * (len(a ) - len(a )) )
__UpperCamelCase : Optional[Any] = []
for i in range(len(a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a )
_A = __add__
def __sub__( self :List[str] , a :Any ) -> Union[str, Any]:
return self + other * -1
def __mul__( self :List[Any] , a :Any ) -> Union[str, Any]:
if not isinstance(a , a ):
__UpperCamelCase : Optional[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a )
__UpperCamelCase : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a )
_A = __mul__
def __truediv__( self :List[Any] , a :List[str] ) -> str:
if not isinstance(a , a ):
__UpperCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a )
raise ValueError
def __floordiv__( self :Optional[Any] , a :List[Any] ) -> List[Any]:
if not isinstance(a , a ):
__UpperCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a )
raise ValueError
def __pow__( self :Dict , a :Any ) -> Optional[Any]:
if n < 0 or isinstance(a , a ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
__UpperCamelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Dict) -> Union[str, Any]:
'''simple docstring'''
if not callable(_lowerCamelCase):
raise ValueError("differentiate() requires a function as input for func")
if not isinstance(_lowerCamelCase , (float, int)):
raise ValueError("differentiate() requires a float as input for position")
if not isinstance(_lowerCamelCase , _lowerCamelCase):
raise ValueError("differentiate() requires an int as input for order")
__UpperCamelCase : Tuple = Dual(_lowerCamelCase , 1)
__UpperCamelCase : Any = func(_lowerCamelCase)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2)) | 151 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 22) -> int:
'''simple docstring'''
__UpperCamelCase : Any = range(1 , _lowerCamelCase)
__UpperCamelCase : int = range(1 , _lowerCamelCase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"{solution(10, 22) = }") | 151 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ReformerTokenizer
UpperCamelCase__ = ReformerTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ReformerTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<s>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(lowerCAmelCase__ ) , 1000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( self , UpperCAmelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
_UpperCAmelCase = 'This is a simple input'
_UpperCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase = ('This is a simple input', 'This is a pair')
_UpperCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='max_length' , )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ReformerTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def UpperCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'Hello World!'
_UpperCAmelCase = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_UpperCAmelCase = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_UpperCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_UpperCAmelCase = ' '.join(lowerCAmelCase__ )
_UpperCAmelCase = self.big_tokenizer.encode_plus(lowerCAmelCase__ , return_tensors='pt' )
_UpperCAmelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
_UpperCAmelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_UpperCAmelCase = encoded_sequence['input_ids'].shape
_UpperCAmelCase = ReformerModel(lowerCAmelCase__ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase__ )
model(**lowerCAmelCase__ )
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_UpperCAmelCase = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=lowerCAmelCase__ , sequences=lowerCAmelCase__ , )
| 39 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModelTester(self )
@slow
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = (1, 11, 768)
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 324 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCamelCase (a_ :List[Any]) -> Dict:
monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''' , set())
@pytest.fixture
def lowerCamelCase (a_ :int) -> Optional[int]:
class __magic_name__ :
def __init__( self : List[str] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Union[str, Any] = metric_id
class __magic_name__ :
__A : int = [MetricMock(__UpperCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def __snake_case ( self : List[str] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('''datasets.inspect.huggingface_hub''' , HfhMock())
@pytest.mark.parametrize(
'''func, args''' , [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))])
def lowerCamelCase (a_ :Union[str, Any] , a_ :str , a_ :Optional[Any] , a_ :Optional[int] , a_ :Tuple) -> str:
if "tmp_path" in args:
lowercase :Optional[int] = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args)
with pytest.warns(a_ , match='''https://huggingface.co/docs/evaluate'''):
func(*a_)
| 172 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "swin2sr"
__A : Dict = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , snake_case__ : List[str]=6_4 , snake_case__ : Union[str, Any]=1 , snake_case__ : Tuple=3 , snake_case__ : int=1_8_0 , snake_case__ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , snake_case__ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case__ : Tuple=8 , snake_case__ : List[Any]=2.0 , snake_case__ : Any=True , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.0 , snake_case__ : Dict=0.1 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=False , snake_case__ : Any=0.02 , snake_case__ : Any=1e-5 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]="1conv" , snake_case__ : List[str]="pixelshuffle" , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Dict = image_size
lowercase :List[str] = patch_size
lowercase :Tuple = num_channels
lowercase :int = embed_dim
lowercase :Any = depths
lowercase :Union[str, Any] = len(snake_case__ )
lowercase :List[str] = num_heads
lowercase :int = window_size
lowercase :Tuple = mlp_ratio
lowercase :List[Any] = qkv_bias
lowercase :Optional[int] = hidden_dropout_prob
lowercase :Tuple = attention_probs_dropout_prob
lowercase :Tuple = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Union[str, Any] = use_absolute_embeddings
lowercase :Dict = layer_norm_eps
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = upscale
lowercase :Any = img_range
lowercase :Optional[int] = resi_connection
lowercase :Union[str, Any] = upsampler
| 172 | 1 |
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCAmelCase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(UpperCamelCase__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def UpperCAmelCase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def UpperCAmelCase ( ):
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(UpperCamelCase__ ):
http_head('https://huggingface.co' )
| 221 | """simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=64 ,__UpperCAmelCase=5 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,) -> List[Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self ) -> str:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
return GPTNeoXConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,)
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
A__ = True
A__ = GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = True
A__ = GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,use_cache=__UpperCAmelCase )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
A__ = torch.cat([input_mask, next_mask] ,dim=-1 )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,past_key_values=__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase ,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-3 ) )
def snake_case__ ( self ) -> Dict:
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__( __A , __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : List[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ : List[str] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> Tuple:
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=64 ,num_attention_heads=8 )
def snake_case__ ( self ) -> str:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> List[str]:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Dict:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
# This regression test was failing with PyTorch < 1.3
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self ) -> str:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self ,__UpperCAmelCase ) -> Tuple:
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 10] ,config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
A__ = original_model(__UpperCAmelCase ).last_hidden_state
A__ = original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 1_0.0}
A__ = GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
A__ = scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase ,__UpperCAmelCase ,atol=1e-5 ) )
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> int:
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
A__ = tokenizer('My favorite food is' ,return_tensors='pt' ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**__UpperCAmelCase ,do_sample=__UpperCAmelCase ,max_new_tokens=20 )
A__ = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase ,__UpperCAmelCase )
| 221 | 1 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int ) -> Optional[Any]:
def get_masked_lm_array(lowercase_ : Dict ):
_lowerCamelCase = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCamelCase = tf.train.load_variable(lowercase_ , lowercase_ )
if "kernel" in name:
_lowerCamelCase = array.transpose()
return torch.from_numpy(lowercase_ )
def get_encoder_array(lowercase_ : Any ):
_lowerCamelCase = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCamelCase = tf.train.load_variable(lowercase_ , lowercase_ )
if "kernel" in name:
_lowerCamelCase = array.transpose()
return torch.from_numpy(lowercase_ )
def get_encoder_layer_array(lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
_lowerCamelCase = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCamelCase = tf.train.load_variable(lowercase_ , lowercase_ )
if "kernel" in name:
_lowerCamelCase = array.transpose()
return torch.from_numpy(lowercase_ )
def get_encoder_attention_layer_array(lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
_lowerCamelCase = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_lowerCamelCase = tf.train.load_variable(lowercase_ , lowercase_ )
_lowerCamelCase = array.reshape(lowercase_ )
if "kernel" in name:
_lowerCamelCase = array.transpose()
return torch.from_numpy(lowercase_ )
print(F"""Loading model based on config from {config_path}...""" )
_lowerCamelCase = BertConfig.from_json_file(lowercase_ )
_lowerCamelCase = BertForMaskedLM(lowercase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowerCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_lowerCamelCase = layer.attention.self
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_lowerCamelCase = layer.attention.output
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_lowerCamelCase = get_encoder_attention_layer_array(
lowercase_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_attention_layer_norm/gamma''' )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_attention_layer_norm/beta''' )
# Intermediate
_lowerCamelCase = layer.intermediate
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_intermediate_dense/kernel''' )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_intermediate_dense/bias''' )
# Output
_lowerCamelCase = layer.output
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_output_dense/kernel''' )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_output_dense/bias''' )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_output_layer_norm/gamma''' )
_lowerCamelCase = get_encoder_layer_array(lowercase_ , '''_output_layer_norm/beta''' )
# Embeddings
_lowerCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
_lowerCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
_lowerCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
_lowerCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_lowerCamelCase = model.cls.predictions.transform
_lowerCamelCase = get_masked_lm_array('''dense/kernel''' )
_lowerCamelCase = get_masked_lm_array('''dense/bias''' )
_lowerCamelCase = get_masked_lm_array('''layer_norm/gamma''' )
_lowerCamelCase = get_masked_lm_array('''layer_norm/beta''' )
_lowerCamelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
_lowerCamelCase = BertPooler(config=lowercase_ )
_lowerCamelCase = get_encoder_array('''_pooler_layer/kernel''' )
_lowerCamelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowercase_ )
# Integration test - should load without any errors ;)
_lowerCamelCase = BertForMaskedLM.from_pretrained(lowercase_ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 366 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=3_2 , lowerCamelCase__=3 , lowerCamelCase__=1_0 , lowerCamelCase__=[1_0, 2_0, 3_0, 4_0] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = num_channels
_lowerCamelCase = embeddings_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = depths
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_act
_lowerCamelCase = num_labels
_lowerCamelCase = scope
_lowerCamelCase = len(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = self.get_config()
return config, pixel_values
def snake_case__ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = FlaxRegNetModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = self.num_labels
_lowerCamelCase = FlaxRegNetForImageClassification(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase_( A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase__ : List[Any] = False
lowercase__ : Tuple = False
lowercase__ : Union[str, Any] = False
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def snake_case__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self ):
return
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model_class(lowerCamelCase__ )
@jax.jit
def model_jitted(lowerCamelCase__ , **lowerCamelCase__ ):
return model(pixel_values=lowerCamelCase__ , **lowerCamelCase__ )
with self.subTest('''JIT Enabled''' ):
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_lowerCamelCase = model_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_( ) -> Optional[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
_lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''np''' )
_lowerCamelCase = model(**lowerCamelCase__ )
# verify the logits
_lowerCamelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 73 | 0 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase_ : Dict = 2
class __lowerCAmelCase :
def __init__( self : Optional[int] , *, # begin keyword-only arguments
snake_case__ : Any="<s>" , snake_case__ : List[Any]="<pad>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : List[str]="<unk>" , snake_case__ : Optional[int]=None , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = bos, unk, pad, eos
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = self.add_symbol(snake_case__ )
_UpperCAmelCase = self.add_symbol(snake_case__ )
_UpperCAmelCase = self.add_symbol(snake_case__ )
_UpperCAmelCase = self.add_symbol(snake_case__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case__ )
_UpperCAmelCase = len(self.symbols )
def __eq__( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self : int , snake_case__ : Any ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self : Tuple , snake_case__ : List[Any] ):
"""simple docstring"""
return sym in self.indices
@classmethod
def UpperCamelCase ( cls : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = cls()
d.add_from_file(snake_case__ )
return d
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple=1 , snake_case__ : Any=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
_UpperCAmelCase = self.indices[word]
_UpperCAmelCase = self.count[idx] + n
return idx
else:
_UpperCAmelCase = len(self.symbols )
_UpperCAmelCase = idx
self.symbols.append(snake_case__ )
self.count.append(snake_case__ )
return idx
def UpperCamelCase ( self : List[str] , snake_case__ : List[Any] ):
"""simple docstring"""
return 0
def UpperCamelCase ( self : Tuple , snake_case__ : Dict ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case__ ) )
return
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = self._load_meta(snake_case__ )
for line in lines[indices_start_line:]:
try:
_UpperCAmelCase , _UpperCAmelCase = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = line.rsplit(" " , 1 )
else:
_UpperCAmelCase = False
_UpperCAmelCase = int(snake_case__ )
_UpperCAmelCase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case__ ) )
self.add_symbol(snake_case__ , n=snake_case__ , overwrite=snake_case__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = dict((re.sub(R"@@$" , "" , snake_case_ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , snake_case_ ), v) for k, v in d.items() )
_UpperCAmelCase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_UpperCAmelCase = d[k] # restore
return da
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
if not os.path.exists(snake_case_ ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCAmelCase = os.path.join(snake_case_ , "checkpoint.pt" )
if not os.path.isfile(snake_case_ ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
_UpperCAmelCase = torch.load(snake_case_ , map_location="cpu" )
_UpperCAmelCase = chkpt["cfg"]["model"]
# dicts
_UpperCAmelCase = os.path.join(snake_case_ , "dict.txt" )
if not os.path.isfile(snake_case_ ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
_UpperCAmelCase = Dictionary.load(snake_case_ )
_UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , VOCAB_FILES_NAMES["vocab_file"] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# merges_file (bpecodes)
_UpperCAmelCase = os.path.join(snake_case_ , "bpecodes" )
if not os.path.isfile(snake_case_ ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
_UpperCAmelCase = os.path.join(snake_case_ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(snake_case_ , snake_case_ )
# model config
_UpperCAmelCase = os.path.join(snake_case_ , "config.json" )
_UpperCAmelCase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# tokenizer config
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# model
_UpperCAmelCase = chkpt["model"]
# remove unneeded keys
_UpperCAmelCase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case_ , snake_case_ )
_UpperCAmelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
_UpperCAmelCase = model_state_dict.pop(snake_case_ )
else:
_UpperCAmelCase = model_state_dict.pop(snake_case_ )
_UpperCAmelCase = BioGptConfig.from_pretrained(snake_case_ )
_UpperCAmelCase = BioGptForCausalLM(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_ )
# save
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
print("Conversion is done!" )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ : Dict = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 133 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : Tuple = "codegen"
snake_case_ : Optional[Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , snake_case__ : Any=50_400 , snake_case__ : int=2_048 , snake_case__ : Optional[Any]=2_048 , snake_case__ : Tuple=4_096 , snake_case__ : List[str]=28 , snake_case__ : List[Any]=16 , snake_case__ : int=64 , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : List[Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=1e-5 , snake_case__ : int=0.02 , snake_case__ : Union[str, Any]=True , snake_case__ : str=50_256 , snake_case__ : List[str]=50_256 , snake_case__ : Optional[int]=False , **snake_case__ : str , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_ctx
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = rotary_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : List[str] , snake_case__ : PretrainedConfig , snake_case__ : str = "default" , snake_case__ : List[PatchingSpec] = None , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , "pad_token_id" , snake_case__ ):
# TODO: how to do that better?
_UpperCAmelCase = 0
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="inputs" )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return self._config.n_layer
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._config.n_head
def UpperCamelCase ( self : List[Any] , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
"""simple docstring"""
_UpperCAmelCase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return 13
| 133 | 1 |
from __future__ import annotations
from typing import Any
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
if not postfix_notation:
return 0
lowercase_ :List[Any] = {'''+''', '''-''', '''*''', '''/'''}
lowercase_ :list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ :Optional[int] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from __future__ import annotations
from random import random
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ = None ):
lowercase_ :Tuple = value
lowercase_ :Tuple = random()
lowercase_ :Node | None = None
lowercase_ :Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self ):
lowercase_ :Optional[int] = str(self.value ) + ''' '''
lowercase_ :List[str] = str(self.left or '''''' )
lowercase_ :List[Any] = str(self.right or '''''' )
return value + left + right
def UpperCamelCase ( _a , _a ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase_ , lowercase_ :List[Any] = split(root.left , _a )
return left, root
else:
lowercase_ , lowercase_ :Tuple = split(root.right , _a )
return root, right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase_ :Tuple = merge(left.right , _a )
return left
else:
lowercase_ :Optional[int] = merge(_a , right.left )
return right
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ :str = Node(_a )
lowercase_ , lowercase_ :Dict = split(_a , _a )
return merge(merge(_a , _a ) , _a )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
lowercase_ , lowercase_ :List[str] = split(_a , value - 1 )
lowercase_ , lowercase_ :Tuple = split(_a , _a )
return merge(_a , _a )
def UpperCamelCase ( _a ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( _a , _a ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase_ :Any = insert(_a , int(arg[1:] ) )
elif arg[0] == "-":
lowercase_ :Optional[int] = erase(_a , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :List[Any] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase_ :Optional[Any] = input()
while args != "q":
lowercase_ :Union[str, Any] = interact_treap(_a , _a )
print(_a )
lowercase_ :str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] ):
snake_case_ : str = []
def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , **lowercase_ : Tuple ):
self.events.append('''on_init_end''' )
def _snake_case ( self : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_train_begin''' )
def _snake_case ( self : Any , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_train_end''' )
def _snake_case ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , **lowercase_ : List[Any] ):
self.events.append('''on_epoch_begin''' )
def _snake_case ( self : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , **lowercase_ : Optional[Any] ):
self.events.append('''on_epoch_end''' )
def _snake_case ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , **lowercase_ : Optional[Any] ):
self.events.append('''on_step_begin''' )
def _snake_case ( self : int , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , **lowercase_ : List[str] ):
self.events.append('''on_step_end''' )
def _snake_case ( self : str , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : List[str] ):
self.events.append('''on_evaluate''' )
def _snake_case ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] , **lowercase_ : str ):
self.events.append('''on_predict''' )
def _snake_case ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , **lowercase_ : Union[str, Any] ):
self.events.append('''on_save''' )
def _snake_case ( self : str , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] , **lowercase_ : Any ):
self.events.append('''on_log''' )
def _snake_case ( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
self.events.append('''on_prediction_step''' )
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : List[str] ):
snake_case_ : Tuple = tempfile.mkdtemp()
def _snake_case ( self : Tuple ):
shutil.rmtree(self.output_dir )
def _snake_case ( self : int , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=64 , lowercase_ : Union[str, Any]=64 , lowercase_ : Union[str, Any]=None , lowercase_ : Any=False , **lowercase_ : List[Any] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case_ : int = RegressionDataset(length=lowercase_ )
snake_case_ : Any = RegressionDataset(length=lowercase_ )
snake_case_ : int = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
snake_case_ : Tuple = RegressionPreTrainedModel(lowercase_ )
snake_case_ : Any = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
snake_case_ : Any = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
snake_case_ : List[str] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def _snake_case ( self : Optional[Any] , lowercase_ : Tuple ):
snake_case_ : Tuple = ['''on_init_end''', '''on_train_begin''']
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = len(trainer.get_eval_dataloader() )
snake_case_ : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _snake_case ( self : List[str] ):
snake_case_ : Union[str, Any] = self.get_trainer()
snake_case_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
snake_case_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case_ : Optional[int] = self.get_trainer(disable_tqdm=lowercase_ )
snake_case_ : List[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : int ):
snake_case_ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : Dict = self.get_trainer()
snake_case_ : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
snake_case_ : Optional[int] = self.get_trainer()
snake_case_ : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
snake_case_ : List[Any] = self.get_trainer()
snake_case_ : Optional[int] = trainer.callback_handler.callbacks[0]
snake_case_ : Optional[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def _snake_case ( self : List[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=lowercase_ )
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
snake_case_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case_ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
snake_case_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
snake_case_ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
snake_case_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
snake_case_ : str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
snake_case_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
snake_case_ : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 264 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =StableDiffusionPanoramaPipeline
lowerCamelCase : Any =TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Tuple =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> int:
torch.manual_seed(0 )
a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a : int = DDIMScheduler()
torch.manual_seed(0 )
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
a : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[Any]:
a : int = torch.manual_seed(lowerCAmelCase__ )
a : str = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> int:
a : str = "cpu" # ensure determinism for the device-dependent torch.Generator
a : str = self.get_dummy_components()
a : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
a : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
a : int = sd_pipe(**lowerCAmelCase__ ).images
a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : str = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Optional[int]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def __a ( self ) -> List[Any]:
a : str = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Dict = self.get_dummy_components()
a : str = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
a : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
a : Optional[int] = "french fries"
a : Any = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
a : str = output.images
a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Dict = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Any:
a : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : str = self.get_dummy_components()
a : Optional[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
a : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
a : List[str] = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
a : Any = output.images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Dict = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
a : List[str] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
a : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
a : str = sd_pipe(**lowerCAmelCase__ ).images
a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : str = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Any:
a : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
a : int = self.get_dummy_components()
a : List[Any] = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=lowerCAmelCase__ )
a : Union[str, Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
a : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
a : int = self.get_dummy_inputs(lowerCAmelCase__ )
a : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , lowerCAmelCase__=0 ) -> int:
a : List[Any] = torch.manual_seed(lowerCAmelCase__ )
a : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[str]:
a : Any = "stabilityai/stable-diffusion-2-base"
a : List[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
a : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
a : List[str] = self.get_inputs()
a : Optional[int] = pipe(**lowerCAmelCase__ ).images
a : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a : Any = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __a ( self ) -> List[Any]:
a : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowerCAmelCase__ )
a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
a : Optional[Any] = self.get_inputs()
a : Dict = pipe(**lowerCAmelCase__ ).images
a : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
a : List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ) -> Dict:
a : Tuple = 0
def callback_fn(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
a : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
a : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a : Optional[int] = latents[0, -3:, -3:, -1]
a : Union[str, Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
a : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
a : Optional[int] = latents[0, -3:, -3:, -1]
a : str = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
a : Union[str, Any] = False
a : Optional[int] = "stabilityai/stable-diffusion-2-base"
a : Dict = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
a : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
a : Optional[int] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
a : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = "stabilityai/stable-diffusion-2-base"
a : Optional[int] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
a : int = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
a : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
a : str = self.get_inputs()
a : Optional[int] = pipe(**lowerCAmelCase__ )
a : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 79 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 1 |
import numpy as np
def lowerCamelCase__ ( A__ : np.ndarray , A__ : float ):
'''simple docstring'''
return np.where(vector > 0 , A__ , (alpha * (np.exp(A__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 | """simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 77 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase : List[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def __magic_name__( ):
__lowerCAmelCase = cn.convert_to_negative(lowerCamelCase)
# assert negative_img array for at least one True
assert negative_img.any()
def __magic_name__( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase, 1_1_0)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def __magic_name__( ):
__lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4)
# Assert ambiguous array
assert resp.all()
def __magic_name__( ):
__lowerCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''', 0)
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(lowerCamelCase)
# assert canny array for at least one True
assert canny_array.any()
def __magic_name__( ):
assert gg.gaussian_filter(lowerCamelCase, 5, sigma=0.9).all()
def __magic_name__( ):
# laplace diagonals
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
__lowerCAmelCase = conv.img_convolve(lowerCamelCase, lowerCamelCase).astype(lowerCamelCase)
assert res.any()
def __magic_name__( ):
assert med.median_filter(lowerCamelCase, 3).any()
def __magic_name__( ):
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCamelCase)
assert grad.any() and theta.any()
def __magic_name__( ):
__lowerCAmelCase = sp.make_sepia(lowerCamelCase, 2_0)
assert sepia.all()
def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg"):
__lowerCAmelCase = bs.Burkes(imread(lowerCamelCase, 1), 1_2_0)
burkes.process()
assert burkes.output_img.any()
def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg", ):
__lowerCAmelCase = rs.NearestNeighbour(imread(lowerCamelCase, 1), 4_0_0, 2_0_0)
nn.process()
assert nn.output.any()
def __magic_name__( ):
__lowerCAmelCase = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(lowerCamelCase, 0)
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0]):
for j in range(0, image.shape[1]):
__lowerCAmelCase = lbp.local_binary_value(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert lbp_image.any()
| 370 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
import math
def a__ ( UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( UpperCAmelCase : Tuple = 10_001 ) -> int:
try:
UpperCAmelCase : List[str] = int(UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
UpperCAmelCase : list[int] = []
UpperCAmelCase : Tuple = 2
while len(UpperCAmelCase ) < nth:
if is_prime(UpperCAmelCase ):
primes.append(UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowerCamelCase__ ( a , a , a=8 ) -> List[Any]:
_A: int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_A: str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase__ ( a , a=5_12 , a=5_12 ) -> Dict:
_A: Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_A: Tuple = np.array(pil_image.convert('''RGB''' ) )
_A: List[str] = arr.astype(np.floataa ) / 127.5 - 1
_A: Tuple = np.transpose(a , [2, 0, 1] )
_A: Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , movq=lowerCAmelCase_ , )
_A: List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
# get the original timestep using init_timestep
_A: Union[str, Any] = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
_A: str = max(num_inference_steps - init_timestep , 0 )
_A: str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if not isinstance(lowerCAmelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase_ )}""" )
_A: Optional[int] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
_A: Union[str, Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_A: Optional[int] = image
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
_A: Optional[Any] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
_A: Optional[int] = self.movq.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
_A: int = self.movq.config.scaling_factor * init_latents
_A: Optional[Any] = torch.cat([init_latents] , dim=0 )
_A: Any = init_latents.shape
_A: Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
_A: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: List[str] = init_latents
return latents
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
_A: int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
_A: Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_A: int = None
for cpu_offloaded_model in [self.unet, self.movq]:
_A , _A: List[Any] = cpu_offload_with_hook(lowerCAmelCase_ , lowerCAmelCase_ , prev_module_hook=lowerCAmelCase_ )
# We'll offload the last model manually.
_A: Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase_ )
def __call__( self : Optional[Any] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowerCAmelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 5_1_2 , lowerCAmelCase_ : int = 1_0_0 , lowerCAmelCase_ : float = 4.0 , lowerCAmelCase_ : float = 0.3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Any = self._execution_device
_A: Any = guidance_scale > 1.0
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Any = torch.cat(lowerCAmelCase_ , dim=0 )
_A: int = image_embeds.shape[0]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: Dict = torch.cat(lowerCAmelCase_ , dim=0 )
if do_classifier_free_guidance:
_A: Any = image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: str = negative_image_embeds.repeat_interleave(lowerCAmelCase_ , dim=0 )
_A: Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase_ )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_A: List[str] = [image]
if not all(isinstance(lowerCAmelCase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(lowerCAmelCase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_A: List[str] = torch.cat([prepare_image(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for i in image] , dim=0 )
_A: Tuple = image.to(dtype=image_embeds.dtype , device=lowerCAmelCase_ )
_A: Optional[Any] = self.movq.encode(lowerCAmelCase_ )['''latents''']
_A: Optional[int] = latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
self.scheduler.set_timesteps(lowerCAmelCase_ , device=lowerCAmelCase_ )
_A , _A: List[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: Dict = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_A , _A: Optional[int] = downscale_height_and_width(lowerCAmelCase_ , lowerCAmelCase_ , self.movq_scale_factor )
_A: Any = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , image_embeds.dtype , lowerCAmelCase_ , lowerCAmelCase_ )
for i, t in enumerate(self.progress_bar(lowerCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_A: Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A: str = {'''image_embeds''': image_embeds}
_A: Optional[int] = self.unet(
sample=lowerCAmelCase_ , timestep=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , added_cond_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
if do_classifier_free_guidance:
_A , _A: str = noise_pred.split(latents.shape[1] , dim=1 )
_A , _A: int = noise_pred.chunk(2 )
_A , _A: int = variance_pred.chunk(2 )
_A: Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_A: List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_A , _A: Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_A: Any = self.scheduler.step(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ , )[0]
# post-processing
_A: Tuple = self.movq.decode(lowerCAmelCase_ , force_not_quantize=lowerCAmelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_A: int = image * 0.5 + 0.5
_A: Any = image.clamp(0 , 1 )
_A: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A: Union[str, Any] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 121 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_, lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_2_4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=9_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : List[Any]=[3, 6, 1_2, 2_4] , SCREAMING_SNAKE_CASE_ : List[str]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-5 , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = embed_dim
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Any = qkv_bias
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
lowerCAmelCase_ : List[Any] = ['stem'] + [F"stage{idx}" for idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 )]
lowerCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 368 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
"""simple docstring"""
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( lowerCAmelCase__ : int = 35 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : str = x_num * y_den + x_den * y_num
lowerCAmelCase_ : int = x_den * y_den
lowerCAmelCase_ : int = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : List[str] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Union[str, Any] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Union[str, Any] = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=-1
lowerCAmelCase_ : Dict = x_num * y_num
lowerCAmelCase_ : Optional[int] = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Any = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Tuple = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
# n=2
lowerCAmelCase_ : List[str] = x_num * x_num * y_num * y_num
lowerCAmelCase_ : Optional[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCAmelCase__ ) and is_sq(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = int(sqrt(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[int] = gcd(lowerCAmelCase__ , lowerCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Any = add_three(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
unique_s.add(lowerCAmelCase__ )
for num, den in unique_s:
total += Fraction(lowerCAmelCase__ , lowerCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 289 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowerCAmelCase_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
def A__ ( self , snake_case_ , snake_case_ , snake_case_=False ) -> int:
__lowerCAmelCase = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__lowerCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowerCAmelCase_ ( _a ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ) -> int:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = embedding_size
def A__ ( self ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
__lowerCAmelCase = TFMobileBertModel(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
__lowerCAmelCase = TFMobileBertForMaskedLM(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
__lowerCAmelCase = TFMobileBertForNextSentencePrediction(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = TFMobileBertForPreTraining(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFMobileBertForSequenceClassification(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFMobileBertForMultipleChoice(config=snake_case_ )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(snake_case_ , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> str:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFMobileBertForTokenClassification(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
__lowerCAmelCase = TFMobileBertForQuestionAnswering(config=snake_case_ )
__lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowerCAmelCase = model(snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ) -> str:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) = config_and_inputs
__lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def A__ ( self ) -> str:
__lowerCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def A__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def A__ ( self ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
@slow
def A__ ( self ) -> Optional[int]:
for model_name in ["google/mobilebert-uncased"]:
__lowerCAmelCase = TFMobileBertModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ) -> Tuple:
__lowerCAmelCase = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(snake_case_ )[0]
__lowerCAmelCase = [1, 6, 30_522]
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case_ , atol=1e-4 )
| 301 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase_ = CLIPImageProcessor()
lowerCAmelCase_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 279 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=6 , UpperCAmelCase=17 , UpperCAmelCase=23 , UpperCAmelCase=11 , UpperCAmelCase=True , ) -> Tuple:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = act_dim
lowercase_ = state_dim
lowercase_ = hidden_size
lowercase_ = max_length
lowercase_ = is_training
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowercase_ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = DecisionTransformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = DecisionTransformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DecisionTransformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(UpperCAmelCase )] , UpperCAmelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = 2 # number of steps of autoregressive prediction we will perform
lowercase_ = 10 # defined by the RL environment, may be normalized
lowercase_ = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" )
lowercase_ = model.to(UpperCAmelCase )
lowercase_ = model.config
torch.manual_seed(0 )
lowercase_ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ) # env.reset()
lowercase_ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCAmelCase )
lowercase_ = torch.tensor(UpperCAmelCase , device=UpperCAmelCase , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase_ = state
lowercase_ = torch.zeros(1 , 0 , config.act_dim , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.zeros(1 , 0 , device=UpperCAmelCase , dtype=torch.floataa )
lowercase_ = torch.tensor(0 , device=UpperCAmelCase , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCAmelCase ):
lowercase_ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCAmelCase )] , dim=1 )
lowercase_ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ = model(
states=UpperCAmelCase , actions=UpperCAmelCase , rewards=UpperCAmelCase , returns_to_go=UpperCAmelCase , timesteps=UpperCAmelCase , attention_mask=UpperCAmelCase , return_dict=UpperCAmelCase , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCAmelCase , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ = action_pred[0, -1]
lowercase_ = torch.cat([states, state] , dim=1 )
lowercase_ = returns_to_go[0, -1] - reward
lowercase_ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase_ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCAmelCase , dtype=torch.long ) * (step + 1)] , dim=1 )
| 366 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(UpperCAmelCase ):
self.assertDictEqual(UpperCAmelCase , example_records[i] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self._create_example_records()
lowercase_ = Dataset.from_list(UpperCAmelCase )
lowercase_ = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def A__ ( self ) -> Any: # checks what happens with missing columns
'''simple docstring'''
lowercase_ = [{"col_1": 1}, {"col_2": "x"}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def A__ ( self ) -> List[Any]: # checks if the type can be inferred from the second record
'''simple docstring'''
lowercase_ = [{"col_1": []}, {"col_1": [1, 2]}]
lowercase_ = Dataset.from_list(UpperCAmelCase )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = Dataset.from_list([] )
self.assertEqual(len(UpperCAmelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 297 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase__ = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
lowercase__ = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
lowercase__ = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase : List[str] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase : str = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase : List[Any] = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase : Dict = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase : Dict = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : int = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase : List[str] = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : str = checkpoint['time_embed.0.weight']
UpperCAmelCase : Dict = checkpoint['time_embed.0.bias']
UpperCAmelCase : Optional[int] = checkpoint['time_embed.2.weight']
UpperCAmelCase : str = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase : str = checkpoint['label_emb.weight']
UpperCAmelCase : Any = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase : List[str] = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase : Tuple = unet_config['down_block_types']
UpperCAmelCase : Union[str, Any] = unet_config['layers_per_block']
UpperCAmelCase : Dict = unet_config['attention_head_dim']
UpperCAmelCase : Optional[Any] = unet_config['block_out_channels']
UpperCAmelCase : str = 1
UpperCAmelCase : int = channels_list[0]
for i, layer_type in enumerate(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = channels_list[i]
UpperCAmelCase : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : Any = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Any = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Dict = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCAmelCase_ ):
UpperCAmelCase : str = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase : Optional[Any] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase : List[Any] = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase : Optional[int] = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Optional[Any] = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase : List[str] = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
UpperCAmelCase : Tuple = current_channels
# hardcoded the mid-block for now
UpperCAmelCase : int = 'mid_block.resnets.0'
UpperCAmelCase : Tuple = 'middle_block.0'
UpperCAmelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = 'mid_block.attentions.0'
UpperCAmelCase : List[Any] = 'middle_block.1'
UpperCAmelCase : Optional[int] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = 'mid_block.resnets.1'
UpperCAmelCase : Dict = 'middle_block.2'
UpperCAmelCase : Union[str, Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : List[str] = 0
UpperCAmelCase : int = unet_config['up_block_types']
for i, layer_type in enumerate(UpperCAmelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : List[Any] = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : Any = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : int = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase : int = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase : int = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase : Tuple = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase : Any = convert_attention(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
current_layer += 1
if i != len(UpperCAmelCase_ ) - 1:
UpperCAmelCase : str = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase : Optional[Any] = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Any = checkpoint['out.0.weight']
UpperCAmelCase : Optional[int] = checkpoint['out.0.bias']
UpperCAmelCase : Tuple = checkpoint['out.2.weight']
UpperCAmelCase : str = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
lowercase__ = parser.parse_args()
lowercase__ = strabool(args.class_cond)
lowercase__ = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase__ = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
lowercase__ = None
lowercase__ = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
lowercase__ = CMStochasticIterativeScheduler(**scheduler_config)
lowercase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 151 |
'''simple docstring'''
import numpy as np
def UpperCamelCase( UpperCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class snake_case_ ( _UpperCAmelCase ):
def __init__( self : Tuple , *lowercase_ : Optional[int] , **lowercase_ : List[str] ) -> str:
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 352 | def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 0 |
"""simple docstring"""
import os
import numpy
import onnx
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : List[Any] = a.name
__snake_case : Optional[int] = b.name
__snake_case : Any = ''
__snake_case : Optional[Any] = ''
__snake_case : Union[str, Any] = a == b
__snake_case : Union[str, Any] = name_a
__snake_case : List[Any] = name_b
return res
def __UpperCAmelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> List[Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase_ , UpperCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase_ , UpperCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase_ , UpperCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> List[Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
__snake_case : List[str] = list(model.graph.initializer )
__snake_case : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase_ , UpperCAmelCase_ )
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : int = os.path.dirname(UpperCAmelCase_ )
__snake_case : Optional[Any] = os.path.basename(UpperCAmelCase_ )
__snake_case : str = onnx.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) )
__snake_case : str = list(model.graph.initializer )
__snake_case : Optional[Any] = set()
__snake_case : str = {}
__snake_case : Tuple = []
__snake_case : str = 0
for i in range(len(UpperCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase_ )
dup_set.add(UpperCAmelCase_ )
__snake_case : Tuple = inits[j].data_type
__snake_case : Union[str, Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase_ )
total_reduced_size += mem_size
__snake_case : Tuple = inits[i].name
__snake_case : int = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase_ )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 10_24 / 10_24 / 10_24 , 'GB' )
__snake_case : Any = sorted(UpperCAmelCase_ )
_remove_dup_initializers_from_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : int = 'optimized_' + model_file_name
__snake_case : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
onnx.save(UpperCAmelCase_ , UpperCAmelCase_ )
return new_model
| 172 | """simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase : List[str] = """AutoTokenizer"""
def __init__(self : int , _A : List[str] , _A : str) -> str:
super().__init__(_A , _A)
__snake_case : Tuple = self.feature_extractor
__snake_case : str = False
@classmethod
def _lowercase (cls : Union[str, Any] , _A : Optional[Any] , **_A : str) -> List[Any]:
try:
return super().from_pretrained(_A , **_A)
except OSError:
warnings.warn(
f"Loading a tokenizer inside {cls.__name__} from a config that does not"
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , _A , )
__snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained(_A , **_A)
__snake_case : Any = WavaVecaCTCTokenizer.from_pretrained(_A , **_A)
return cls(feature_extractor=_A , tokenizer=_A)
def __call__(self : int , *_A : List[Any] , **_A : str) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
__snake_case : int = kwargs.pop('raw_speech')
else:
__snake_case : Optional[Any] = kwargs.pop('audio' , _A)
__snake_case : Tuple = kwargs.pop('sampling_rate' , _A)
__snake_case : Any = kwargs.pop('text' , _A)
if len(_A) > 0:
__snake_case : Any = args[0]
__snake_case : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
__snake_case : str = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A)
if text is not None:
__snake_case : List[str] = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__snake_case : List[str] = encodings['input_ids']
return inputs
def _lowercase (self : str , *_A : Optional[Any] , **_A : int) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A)
__snake_case : Optional[int] = kwargs.pop('input_features' , _A)
__snake_case : List[Any] = kwargs.pop('labels' , _A)
if len(_A) > 0:
__snake_case : Tuple = args[0]
__snake_case : Union[str, Any] = args[1:]
if input_features is not None:
__snake_case : Optional[Any] = self.feature_extractor.pad(_A , *_A , **_A)
if labels is not None:
__snake_case : Tuple = self.tokenizer.pad(_A , **_A)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__snake_case : str = labels['input_ids']
return input_features
def _lowercase (self : Union[str, Any] , *_A : Any , **_A : List[Any]) -> List[Any]:
return self.tokenizer.batch_decode(*_A , **_A)
def _lowercase (self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any]) -> Any:
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowercase (self : List[str]) -> int:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
__snake_case : Dict = True
__snake_case : Union[str, Any] = self.tokenizer
yield
__snake_case : Optional[Any] = self.feature_extractor
__snake_case : int = False
| 172 | 1 |
import argparse
import json
import subprocess
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
a :Optional[Any] = []
a :Any = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
a :Tuple = subprocess.run(UpperCAmelCase_ , shell=UpperCAmelCase_ , stdout=subprocess.PIPE )
a :Any = output.stdout.decode('''utf-8''' )
a :Optional[Any] = json.loads(UpperCAmelCase_ )
a :Tuple = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCAmelCase_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) > 0:
a :Optional[Any] = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
return values.split(''',''' )
snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
snake_case : int = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 281 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case : str = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class _snake_case ( _snake_case ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=None ):
a :Tuple = {}
if top_k is not None:
a :int = top_k
return {}, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :str = load_image(_lowerCamelCase )
a :Any = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = self.model(**_lowerCamelCase )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
a :List[Any] = self.model.config.num_labels
if self.framework == "pt":
a :int = model_outputs.logits.softmax(-1 )[0]
a , a :Union[str, Any] = probs.topk(_lowerCamelCase )
elif self.framework == "tf":
a :Optional[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
a :Union[str, Any] = tf.math.top_k(_lowerCamelCase , k=_lowerCamelCase )
a , a :Optional[int] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
a :Optional[int] = scores.tolist()
a :str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 281 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : int = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 73 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*UpperCAmelCase__ ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_: int = list(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
UpperCamelCase_: Union[str, Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (UpperCAmelCase__ ) -> bool:
UpperCamelCase_: Tuple = [
'''CUDA out of memory.''', # CUDA OOM
'''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU
'''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (UpperCAmelCase__ = None , UpperCAmelCase__ = 1_2_8 ) -> Dict:
if function is None:
return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase )
UpperCamelCase_: List[Any] = starting_batch_size
def decorator(*UpperCAmelCase__ , **UpperCAmelCase__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase_: Union[str, Any] = list(inspect.signature(__lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1):
UpperCamelCase_: Union[str, Any] = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(__lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 362 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Any = mock.Mock()
UpperCamelCase_: Dict = 5_0_0
UpperCamelCase_: Any = {}
UpperCamelCase_: Tuple = HTTPError
UpperCamelCase_: List[str] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: int = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: Optional[int] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a ( self ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase_: Union[str, Any] = mock.Mock()
UpperCamelCase_: Union[str, Any] = 5_0_0
UpperCamelCase_: str = {}
UpperCamelCase_: List[str] = HTTPError
UpperCamelCase_: Optional[int] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase_: List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=_lowerCamelCase ) as mock_head:
UpperCamelCase_: str = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase_: Optional[int] = tempfile.mktemp()
with open(_lowerCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , _lowerCamelCase )
UpperCamelCase_: Tuple = AlbertTokenizer.from_pretrained(_lowerCamelCase )
finally:
os.remove(_lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , _lowerCamelCase )
UpperCamelCase_: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def _a ( self ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase_: Any = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : Dict =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ):
UpperCamelCase_: Optional[int] = TOKEN
HfFolder.save_token(_lowerCamelCase )
@classmethod
def _a ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: List[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: int = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Union[str, Any] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_lowerCamelCase , repo_id='test-tokenizer' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: List[str] = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Union[str, Any] = BertTokenizer(_lowerCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
UpperCamelCase_: Dict = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_lowerCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=_lowerCamelCase , use_auth_token=self._token )
UpperCamelCase_: Optional[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Optional[int] = CustomTokenizer(_lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: str = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_: Optional[Any] = os.path.join(_lowerCamelCase , 'vocab.txt' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
UpperCamelCase_: Dict = BertTokenizerFast.from_pretrained(_lowerCamelCase )
bert_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[str] = CustomTokenizerFast.from_pretrained(_lowerCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
UpperCamelCase_: Tuple = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
UpperCamelCase_: int = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=_lowerCamelCase , trust_remote_code=_lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Dict = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def _a ( self ):
UpperCamelCase_: Optional[int] = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def _a ( self ):
UpperCamelCase_: int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def _a ( self ):
UpperCamelCase_: str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def _a ( self ):
UpperCamelCase_: List[str] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def _a ( self ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase_: Union[str, Any] = Trie()
UpperCamelCase_: Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(_lowerCamelCase , ['AB', 'C'] ) | 292 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__lowerCAmelCase : List[Any] ='\\n Text data.\n Second line of data.'
__lowerCAmelCase : Tuple ='file'
@pytest.fixture(scope='''session''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__SCREAMING_SNAKE_CASE : Union[str, Any] = bytes(lowercase__ , '''utf-8''' )
with zstd.open(lowercase__ , '''wb''' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , '''w''' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__SCREAMING_SNAKE_CASE : str = input_paths[compression_format]
__SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''cache'''
__SCREAMING_SNAKE_CASE : Any = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Any = f.read()
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = '''custom_cache'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''custom_extracted_dir'''
__SCREAMING_SNAKE_CASE : int = tmp_path / '''custom_extracted_path'''
if default_extracted:
__SCREAMING_SNAKE_CASE : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowercase__ )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase__ ) )
__SCREAMING_SNAKE_CASE : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__SCREAMING_SNAKE_CASE : int = xz_file
__SCREAMING_SNAKE_CASE : int = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
__SCREAMING_SNAKE_CASE : Optional[int] = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def _UpperCamelCase ( lowercase__ ):
# absolute path
__SCREAMING_SNAKE_CASE : Dict = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__SCREAMING_SNAKE_CASE : str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def _UpperCamelCase ( lowercase__ ):
# absolute path
__SCREAMING_SNAKE_CASE : int = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__SCREAMING_SNAKE_CASE : Dict = '''./__missing_file__.txt'''
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( ):
with pytest.raises(lowercase__ ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
http_get('''https://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
ftp_get('''ftp://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(lowercase__ ):
fsspec_get('''s3://huggingface.co''' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('''s3://huggingface.co''' )
| 9 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase = GPTaConfig()
else:
lowerCamelCase = GPTaConfig.from_json_file(lowerCamelCase__ )
lowerCamelCase = GPTaModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
lowerCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCamelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
UpperCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 252 | 0 |
import numpy as np
def A ( a_ ,a_ ,a_ = 1e-12 ,a_ = 100 ,) -> tuple[float, np.ndarray]:
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__UpperCamelCase : int =np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase : List[str] =False
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Tuple =0
__UpperCamelCase : List[str] =1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase : Tuple =np.dot(a_ ,a_ )
# Normalize the resulting output vector.
__UpperCamelCase : Union[str, Any] =w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase : List[str] =vector.conj().T if is_complex else vector.T
__UpperCamelCase : Union[str, Any] =np.dot(a_ ,np.dot(a_ ,a_ ) )
# Check convergence.
__UpperCamelCase : int =np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase : Dict =True
__UpperCamelCase : Optional[int] =lambda_
if is_complex:
__UpperCamelCase : List[Any] =np.real(lambda_ )
return lambda_, vector
def A ( ) -> None:
__UpperCamelCase : int =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase : Union[str, Any] =np.array([41, 4, 20] )
__UpperCamelCase : str =real_input_matrix.astype(np.complexaaa )
__UpperCamelCase : Union[str, Any] =np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase : Union[str, Any] =np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase : Optional[int] =real_input_matrix
__UpperCamelCase : Optional[Any] =real_vector
elif problem_type == "complex":
__UpperCamelCase : Dict =complex_input_matrix
__UpperCamelCase : Dict =complex_vector
# Our implementation.
__UpperCamelCase : int =power_iteration(a_ ,a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase : List[Any] =np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__UpperCamelCase : str =eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase : Any =eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 353 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[Any] =AltDiffusionPipeline
UpperCamelCase__ : Optional[Any] =TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : Tuple =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Dict =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__UpperCamelCase : Optional[int] =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : int =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__UpperCamelCase : Dict =77
__UpperCamelCase : List[str] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : Optional[Any] =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : List[Any] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : List[str] =self.get_dummy_components()
torch.manual_seed(0 )
__UpperCamelCase : List[str] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__UpperCamelCase : List[str] =RobertaSeriesModelWithTransformation(lowerCamelCase__ )
__UpperCamelCase : Any =text_encoder
__UpperCamelCase : int =AltDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Any =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Tuple ='A photo of an astronaut'
__UpperCamelCase : str =alt_pipe(**lowerCamelCase__ )
__UpperCamelCase : Any =output.images
__UpperCamelCase : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Dict =np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Optional[int] =self.get_dummy_components()
__UpperCamelCase : int =PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__UpperCamelCase : Tuple =RobertaSeriesModelWithTransformation(lowerCamelCase__ )
__UpperCamelCase : List[str] =text_encoder
__UpperCamelCase : List[str] =AltDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Dict =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =alt_pipe(**lowerCamelCase__ )
__UpperCamelCase : str =output.images
__UpperCamelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Dict =np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCamelCase__ )
__UpperCamelCase : List[str] =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Any ='A painting of a squirrel eating a burger'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =alt_pipe([prompt] , generator=lowerCamelCase__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__UpperCamelCase : List[str] =output.images
__UpperCamelCase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : Optional[int] =np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__UpperCamelCase : Tuple =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
__UpperCamelCase : Any =alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ='A painting of a squirrel eating a burger'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Tuple =alt_pipe([prompt] , generator=lowerCamelCase__ , num_inference_steps=2 , output_type='numpy' )
__UpperCamelCase : List[str] =output.images
__UpperCamelCase : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__UpperCamelCase : List[str] =np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 245 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase_ = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
lowerCamelCase_ = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
lowerCamelCase_ = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[str]="auto" , __UpperCAmelCase : List[str]=-1 , __UpperCAmelCase : Optional[int]=0.9 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : List[Any]=500 , __UpperCAmelCase : List[str]="gpt2-large" , __UpperCAmelCase : str=-1 , __UpperCAmelCase : Dict=1024 , __UpperCAmelCase : Optional[Any]=25 , __UpperCAmelCase : Tuple=5 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=25 , ):
'''simple docstring'''
_A = compute_mauve(
p_text=__UpperCAmelCase , q_text=__UpperCAmelCase , p_features=__UpperCAmelCase , q_features=__UpperCAmelCase , p_tokens=__UpperCAmelCase , q_tokens=__UpperCAmelCase , num_buckets=__UpperCAmelCase , pca_max_data=__UpperCAmelCase , kmeans_explained_var=__UpperCAmelCase , kmeans_num_redo=__UpperCAmelCase , kmeans_max_iter=__UpperCAmelCase , featurize_model_name=__UpperCAmelCase , device_id=__UpperCAmelCase , max_text_length=__UpperCAmelCase , divergence_curve_discretization_size=__UpperCAmelCase , mauve_scaling_factor=__UpperCAmelCase , verbose=__UpperCAmelCase , seed=__UpperCAmelCase , )
return out
| 79 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase_ = logging.getLogger(__name__)
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
if os.path.exists(__lowercase ):
if os.path.exists(os.path.join(__lowercase , "config.json" ) ) and os.path.isfile(
os.path.join(__lowercase , "config.json" ) ):
os.remove(os.path.join(__lowercase , "config.json" ) )
if os.path.exists(os.path.join(__lowercase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__lowercase , "pytorch_model.bin" ) ):
os.remove(os.path.join(__lowercase , "pytorch_model.bin" ) )
else:
os.makedirs(__lowercase )
model.save_pretrained(__lowercase )
def __lowercase ( __lowercase , __lowercase=False ) -> Optional[int]:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(__lowercase , __lowercase )
_A = p * torch.log(__lowercase )
_A = 0
return -plogp.sum(dim=-1 )
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__lowercase ) ) ) )
for row in range(len(__lowercase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=True , __lowercase=True , __lowercase=None , __lowercase=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
_A = torch.zeros(__lowercase , __lowercase ).to(args.device )
if head_mask is None:
_A = torch.ones(__lowercase , __lowercase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowercase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(__lowercase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(__lowercase , labels=__lowercase , head_mask=__lowercase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowercase ):
_A = entropy(attn.detach() , __lowercase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowercase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(__lowercase , __lowercase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__lowercase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__lowercase )
logger.info("Head ranked by importance scores" )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(__lowercase )
print_ad_tensor(__lowercase )
return attn_entropy, head_importance, total_loss
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __lowercase , original_score * args.masking_threshold )
_A = torch.ones_like(__lowercase )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float("Inf" )
_A = head_importance.view(-1 ).sort()[1]
if len(__lowercase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(__lowercase )
_A = new_head_mask.clone().detach()
print_ad_tensor(__lowercase )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , head_mask=__lowercase )
_A = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __lowercase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__lowercase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowercase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowercase , __lowercase ):
_A = [
v,
]
assert sum(len(__lowercase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowercase )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
__lowercase , __lowercase , __lowercase , compute_entropy=__lowercase , compute_importance=__lowercase , head_mask=__lowercase , actually_pruned=__lowercase , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __lowercase , __lowercase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __lowercase , __lowercase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__lowercase , args.output_dir )
def __lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__lowercase , type=__lowercase , required=__lowercase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__lowercase , type=__lowercase , required=__lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__lowercase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__lowercase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__lowercase , type=__lowercase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__lowercase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__lowercase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__lowercase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__lowercase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__lowercase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__lowercase , help="Batch size." )
parser.add_argument("--seed" , type=__lowercase , default=42 )
parser.add_argument("--local_rank" , type=__lowercase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__lowercase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__lowercase , default="" , help="Can be used for distant debugging." )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowercase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device("cuda" , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
__lowercase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowercase )
elif args.n_gpu > 1:
_A = nn.DataParallel(__lowercase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowercase )
torch.save(__lowercase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __lowercase )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(__lowercase ),)
_A = TensorDataset(*__lowercase )
_A = RandomSampler(__lowercase )
_A = DataLoader(__lowercase , sampler=__lowercase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowercase , __lowercase , __lowercase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(__lowercase , __lowercase , __lowercase )
prune_heads(__lowercase , __lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
main()
| 79 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Any = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , _UpperCAmelCase ).groups()[0]
class __magic_name__ ( SCREAMING_SNAKE_CASE__):
def __init__( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = file_names
UpperCamelCase__ : str = image_transform
UpperCamelCase__ : Union[str, Any] = label_to_id
def __len__( self : Tuple ) -> str:
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : str , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.file_names[idx]
UpperCamelCase__ : int = PIL.Image.open(lowerCamelCase__ )
UpperCamelCase__ : int = raw_image.convert('''RGB''' )
if self.image_transform is not None:
UpperCamelCase__ : Union[str, Any] = self.image_transform(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = extract_label(lowerCamelCase__ )
if self.label_to_id is not None:
UpperCamelCase__ : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if args.with_tracking:
UpperCamelCase__ : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCamelCase__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ : Tuple = config['lr']
UpperCamelCase__ : int = int(config['''num_epochs'''] )
UpperCamelCase__ : Union[str, Any] = int(config['''seed'''] )
UpperCamelCase__ : Optional[int] = int(config['''batch_size'''] )
UpperCamelCase__ : Dict = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
UpperCamelCase__ : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
UpperCamelCase__ : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCamelCase__ : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCamelCase__ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCamelCase__ : Dict = os.path.split(_UpperCAmelCase )[-1].split('''.''' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
UpperCamelCase__ : str = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
UpperCamelCase__ : Optional[int] = [extract_label(_UpperCAmelCase ) for fname in file_names]
UpperCamelCase__ : Dict = list(set(_UpperCAmelCase ) )
id_to_label.sort()
UpperCamelCase__ : Tuple = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
UpperCamelCase__ : Optional[Any] = np.random.permutation(len(_UpperCAmelCase ) )
UpperCamelCase__ : int = int(0.8 * len(_UpperCAmelCase ) )
UpperCamelCase__ : str = random_perm[:cut]
UpperCamelCase__ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCamelCase__ : List[Any] = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
UpperCamelCase__ : Any = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
UpperCamelCase__ : Any = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
UpperCamelCase__ : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
UpperCamelCase__ : List[Any] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
UpperCamelCase__ : Optional[int] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ : Any = create_model('''resnet50d''' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase__ : Dict = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCamelCase__ : Dict = False
for param in model.get_classifier().parameters():
UpperCamelCase__ : Union[str, Any] = True
# We normalize the batches of images to be a bit faster.
UpperCamelCase__ : List[str] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
UpperCamelCase__ : Tuple = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCamelCase__ : Dict = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCamelCase__ : Any = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCamelCase__ : int = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ : Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCamelCase__ : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCamelCase__ : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCamelCase__ : Optional[Any] = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
UpperCamelCase__ : Optional[int] = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
UpperCamelCase__ : Tuple = None
else:
UpperCamelCase__ : str = int(training_difference.replace('''step_''' , '''''' ) )
UpperCamelCase__ : Optional[int] = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
UpperCamelCase__ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCamelCase__ : Union[str, Any] = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCamelCase__ : Tuple = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCamelCase__ : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCamelCase__ : int = (batch['image'] - mean) / std
UpperCamelCase__ : str = model(_UpperCAmelCase )
UpperCamelCase__ : Dict = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase__ : List[str] = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCamelCase__ : Union[str, Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : List[str] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCamelCase__ : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCamelCase__ : Dict = (batch['image'] - mean) / std
with torch.no_grad():
UpperCamelCase__ : int = model(_UpperCAmelCase )
UpperCamelCase__ : List[Any] = outputs.argmax(dim=-1 )
UpperCamelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''label''']) )
UpperCamelCase__ : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCamelCase__ : Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 100 * eval_metric,
'''train_loss''': total_loss.item() / len(_UpperCAmelCase ),
'''epoch''': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
UpperCamelCase__ : int = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCamelCase__ : Dict = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=_UpperCAmelCase , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=_UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=_UpperCAmelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCamelCase__ : Optional[Any] = parser.parse_args()
UpperCamelCase__ : Tuple = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 358 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Any=False ):
"""simple docstring"""
UpperCamelCase__ : str = '''backbone.''' if is_semantic else ''''''
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCamelCase__ : int = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCamelCase__ : Tuple = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCamelCase__ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[int] = q_bias
UpperCamelCase__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : Union[str, Any] = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ : List[Any] = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCamelCase__ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCamelCase__ : Any = gamma_a
UpperCamelCase__ : str = gamma_a
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = val
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = False if '''rvlcdip''' in checkpoint_url else True
UpperCamelCase__ : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE , use_mask_token=SCREAMING_SNAKE_CASE )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ : List[str] = 1024
UpperCamelCase__ : Union[str, Any] = 4096
UpperCamelCase__ : Optional[int] = 24
UpperCamelCase__ : List[str] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ : Any = 16
UpperCamelCase__ : Optional[int] = '''huggingface/label-files'''
UpperCamelCase__ : Union[str, Any] = '''rvlcdip-id2label.json'''
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ : int = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_lm_head=SCREAMING_SNAKE_CASE )
# load HuggingFace model
UpperCamelCase__ : Tuple = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
UpperCamelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
UpperCamelCase__ : str = model(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = outputs.logits
# verify logits
UpperCamelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ : Any = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCamelCase__ : Optional[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__UpperCamelCase : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 51 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Any=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : List[str]=3 , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 288}
__SCREAMING_SNAKE_CASE = size_divisor
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_pad
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]:
"""simple docstring"""
if not batched:
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__SCREAMING_SNAKE_CASE = image.size
else:
__SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
__SCREAMING_SNAKE_CASE = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__SCREAMING_SNAKE_CASE = size, scale * w
else:
__SCREAMING_SNAKE_CASE = scale * h, size
__SCREAMING_SNAKE_CASE = int((1_333 / 800) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__SCREAMING_SNAKE_CASE = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = newh * scale
__SCREAMING_SNAKE_CASE = neww * scale
__SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
__SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowercase__ = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def __a ( _SCREAMING_SNAKE_CASE = "dhaka" , _SCREAMING_SNAKE_CASE = 5 ) ->int:
a__: Tuple = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
a__: Union[str, Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
a__: Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
a__: str = BeautifulSoup(html.text , 'html.parser' )
a__: Dict = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
a__: int = json.dumps(_SCREAMING_SNAKE_CASE )
a__: List[Any] = json.loads(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
a__: Any = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
a__: Tuple = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
a__: int = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
a__: List[Any] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
a__: List[Any] = urllib.request.build_opener()
a__: str = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
a__: List[str] = F'query_{query.replace(" " , "_" )}'
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
lowercase__ = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print('Please provide a search term.')
raise
| 203 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ = re.compile(r'\b(a|an|the)\b', re.UNICODE)
lowercase__ = None
def __a ( ) ->List[Any]:
a__: Dict = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_SCREAMING_SNAKE_CASE , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_SCREAMING_SNAKE_CASE , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
a__: Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(' ' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
a__: Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
a__: Any = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = get_tokens(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
a__: Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a__: Any = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
a__: Dict = (2 * precision * recall) / (precision + recall)
return fa
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: Union[str, Any] = {}
a__: Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a__: Optional[int] = qa['id']
a__: List[Any] = [t for t in qa['answers']['text'] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a__: str = ['']
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
a__: Any = preds[qid]
# Take max over all gold answers
a__: List[str] = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
a__: Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: List[str] = {}
for qid, s in scores.items():
a__: List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a__: Optional[int] = float(not qid_to_has_ans[qid] )
else:
a__: Optional[Any] = s
return new_scores
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
if not qid_list:
a__: str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a__: Optional[Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
for k in new_eval:
a__: List[Any] = new_eval[k]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->List[str]:
a__: Optional[int] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
a__: Dict = 0.0
a__: Optional[int] = 1.0
a__: Tuple = 0.0
a__: Tuple = [1.0]
a__: Optional[Any] = [0.0]
a__: Optional[Any] = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a__: Optional[Any] = true_pos / float(i + 1 )
a__: int = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
a__: Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a__: Optional[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a__: List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a__: Optional[Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
a__: List[Any] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_exact' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_f1' )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'pr_oracle' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
if not qid_list:
return
a__: Any = [na_probs[k] for k in qid_list]
a__: List[str] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'na_prob_hist_{name}.png' ) )
plt.clf()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__: str = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a__: List[Any] = num_no_ans
a__: Union[str, Any] = cur_score
a__: Optional[Any] = 0.0
a__: str = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a__: Tuple = scores[qid]
else:
if preds[qid]:
a__: Optional[Any] = -1
else:
a__: Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a__: Dict = cur_score
a__: Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[str]:
a__ , a__: str = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ , a__: Optional[int] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = best_exact
a__: Dict = exact_thresh
a__: Optional[int] = best_fa
a__: str = fa_thresh
def __a ( ) ->int:
with open(OPTS.data_file ) as f:
a__: Tuple = json.load(_SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a__: Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
a__: Optional[Any] = {k: 0.0 for k in preds}
a__: List[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
a__: Optional[int] = [k for k, v in qid_to_has_ans.items() if v]
a__: Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a__ , a__: Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
a__: str = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
a__: List[str] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'HasAns' )
if no_ans_qids:
a__: Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
lowercase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 203 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowercase :
"""simple docstring"""
A__ = None
A__ = None
A__ = None # sigma(t_i)
@classmethod
def lowerCAmelCase ( cls : Any ):
'''simple docstring'''
return cls()
@dataclass
class _lowercase ( lowerCamelCase__):
"""simple docstring"""
A__ = 42
A__ = 42
A__ = 42
class _lowercase ( lowerCamelCase__ , lowerCamelCase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Optional[int] , __lowerCamelCase : float = 0.0_2 , __lowerCamelCase : float = 100 , __lowerCamelCase : float = 1.0_0_7 , __lowerCamelCase : float = 80 , __lowerCamelCase : float = 0.0_5 , __lowerCamelCase : float = 50 , ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def lowerCAmelCase ( self : int , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : int , __lowerCamelCase : Tuple = () ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = jnp.arange(0 , __A )[::-1].copy()
lowerCamelCase__ : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A , schedule=jnp.array(__A , dtype=jnp.floataa ) , timesteps=__A , )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : random.KeyArray , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase__ : int = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase__ : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase__ : Union[str, Any] = random.split(__A , num=1 )
lowerCamelCase__ : List[str] = self.config.s_noise * random.normal(key=__A , shape=sample.shape )
lowerCamelCase__ : Tuple = sigma + gamma * sigma
lowerCamelCase__ : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
'''simple docstring'''
lowerCamelCase__ : int = sample_hat + sigma_hat * model_output
lowerCamelCase__ : Dict = (sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase__ : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : jnp.ndarray , __lowerCamelCase : bool = True , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = sample_prev + sigma_prev * model_output
lowerCamelCase__ : List[str] = (sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase__ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A , derivative=__A , state=__A )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : KarrasVeSchedulerState , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 184 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """char"""
SCREAMING_SNAKE_CASE__ = """bpe"""
SCREAMING_SNAKE_CASE__ = """wp"""
lowerCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""image_processor""", """char_tokenizer"""]
SCREAMING_SNAKE_CASE__ = """ViTImageProcessor"""
SCREAMING_SNAKE_CASE__ = """MgpstrTokenizer"""
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _snake_case , )
UpperCamelCase__ = kwargs.pop("""feature_extractor""" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
UpperCamelCase__ = tokenizer
UpperCamelCase__ = AutoTokenizer.from_pretrained("""gpt2""" )
UpperCamelCase__ = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(_snake_case , _snake_case )
def __call__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
UpperCamelCase__ = self.image_processor(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is not None:
UpperCamelCase__ = self.char_tokenizer(_snake_case , return_tensors=_snake_case , **_snake_case )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase__ = encodings["""input_ids"""]
return inputs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = sequences
UpperCamelCase__ = char_preds.size(0 )
UpperCamelCase__ , UpperCamelCase__ = self._decode_helper(_snake_case , """char""" )
UpperCamelCase__ , UpperCamelCase__ = self._decode_helper(_snake_case , """bpe""" )
UpperCamelCase__ , UpperCamelCase__ = self._decode_helper(_snake_case , """wp""" )
UpperCamelCase__ = []
UpperCamelCase__ = []
for i in range(_snake_case ):
UpperCamelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase__ = scores.index(max(_snake_case ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase__ = {}
UpperCamelCase__ = final_strs
UpperCamelCase__ = final_scores
UpperCamelCase__ = char_strs
UpperCamelCase__ = bpe_strs
UpperCamelCase__ = wp_strs
return out
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if format == DecodeType.CHARACTER:
UpperCamelCase__ = self.char_decode
UpperCamelCase__ = 1
UpperCamelCase__ = """[s]"""
elif format == DecodeType.BPE:
UpperCamelCase__ = self.bpe_decode
UpperCamelCase__ = 2
UpperCamelCase__ = """#"""
elif format == DecodeType.WORDPIECE:
UpperCamelCase__ = self.wp_decode
UpperCamelCase__ = 1_02
UpperCamelCase__ = """[SEP]"""
else:
raise ValueError(F"Format {format} is not supported." )
UpperCamelCase__ , UpperCamelCase__ = [], []
UpperCamelCase__ = pred_logits.size(0 )
UpperCamelCase__ = pred_logits.size(1 )
UpperCamelCase__ , UpperCamelCase__ = pred_logits.topk(1 , dim=-1 , largest=_snake_case , sorted=_snake_case )
UpperCamelCase__ = preds_index.view(-1 , _snake_case )[:, 1:]
UpperCamelCase__ = decoder(_snake_case )
UpperCamelCase__ , UpperCamelCase__ = torch.nn.functional.softmax(_snake_case , dim=2 ).max(dim=2 )
UpperCamelCase__ = preds_max_prob[:, 1:]
for index in range(_snake_case ):
UpperCamelCase__ = preds_str[index].find(_snake_case )
UpperCamelCase__ = preds_str[index][:pred_eos]
UpperCamelCase__ = preds_index[index].cpu().tolist()
UpperCamelCase__ = pred_index.index(_snake_case ) if eos_token in pred_index else -1
UpperCamelCase__ = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_snake_case )
conf_scores.append(_snake_case )
return dec_strs, conf_scores
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(_snake_case )]
return decode_strs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
return self.bpe_tokenizer.batch_decode(_snake_case )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(_snake_case )]
return decode_strs
| 361 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ (*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __magic_name__ ( __a : Image ):
'''simple docstring'''
UpperCamelCase__ = np.array(__a )
UpperCamelCase__ = npimg.shape
return {"hash": hashimage(__a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = MaskGenerationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase_ (self ):
pass
@slow
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
UpperCamelCase__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """facebook/sam-vit-huge"""
UpperCamelCase__ = pipeline("""mask-generation""" , model=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCamelCase__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(SCREAMING_SNAKE_CASE_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] , )
| 178 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = None ):
lowerCamelCase_ = word_bank or []
# create a table
lowerCamelCase_ = len(_A ) + 1
lowerCamelCase_ = []
for _ in range(_A ):
table.append([] )
# seed value
lowerCamelCase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_A )] == word:
lowerCamelCase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_A )]:
combination.reverse()
return table[len(_A )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 19 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "roberta-prelayernorm"
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any]=5_0265 , __lowerCamelCase : str=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : str=3072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=512 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : List[Any]=1e-12 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=None , **__lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = position_embedding_type
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = classifier_dropout
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def lowercase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 314 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
A : List[str] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
A : Any = 1_0
A : Union[str, Any] = 2_5_6
def UpperCamelCase ( __magic_name__ : Optional[int] ) -> Optional[MinHash]:
"""simple docstring"""
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase__ = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Set[str]:
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class A :
'''simple docstring'''
def __init__(self : Optional[int] , *,
_UpperCAmelCase : float = 0.85 , ) -> int:
"""simple docstring"""
lowercase__ = duplication_jaccard_threshold
lowercase__ = NUM_PERM
lowercase__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase__ = defaultdict(_UpperCAmelCase )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : MinHash ) -> None:
"""simple docstring"""
lowercase__ = self._index.query(_UpperCAmelCase )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_UpperCAmelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> List[List[Dict]]:
"""simple docstring"""
lowercase__ = []
for base, duplicates in self._duplicate_clusters.items():
lowercase__ = [base] + list(_UpperCAmelCase )
# reformat the cluster to be a list of dict
lowercase__ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(_UpperCAmelCase )
return duplicate_clusters
def lowerCamelCase__ (self : int , _UpperCAmelCase : Any ) -> None:
"""simple docstring"""
lowercase__ = self.get_duplicate_clusters()
with open(_UpperCAmelCase , """w""" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase ( __magic_name__ : Any ) -> int:
"""simple docstring"""
lowercase__ = element
lowercase__ = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> str:
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> float:
"""simple docstring"""
lowercase__ = get_tokens(_UpperCAmelCase )
lowercase__ = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
A : Tuple = None
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = []
for elementa in cluster:
lowercase__ = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowercase__ = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase__ = 1
extremes.append(_UpperCAmelCase )
return extremes
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : int , __magic_name__ : str ) -> List[Any]:
"""simple docstring"""
global _shared_dataset
lowercase__ = dataset
lowercase__ = []
lowercase__ = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : Tuple = 0.8_5 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
"""simple docstring"""
lowercase__ = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowercase__ = {}
lowercase__ = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase__ = element
lowercase__ = duplicate_indices - set(extreme_dict.keys() )
lowercase__ = dataset.filter(lambda __magic_name__ , __magic_name__ : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase__ = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowercase__ = extreme_dict[element['base_index']]['copies']
print(f'''Original dataset size: {len(_UpperCAmelCase )}''' )
print(f'''Number of duplicate clusters: {len(_UpperCAmelCase )}''' )
print(f'''Files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Unique files in duplicate cluster: {len(_UpperCAmelCase )}''' )
print(f'''Filtered dataset size: {len(_UpperCAmelCase )}''' )
return ds_filter, duplicate_clusters
| 351 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A : List[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[str] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 146 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'bert-generation'
def __init__( self : Union[str, Any] , __snake_case : int=5_03_58 , __snake_case : List[Any]=10_24 , __snake_case : List[str]=24 , __snake_case : List[str]=16 , __snake_case : Tuple=40_96 , __snake_case : Tuple="gelu" , __snake_case : List[Any]=0.1 , __snake_case : Tuple=0.1 , __snake_case : int=5_12 , __snake_case : Tuple=0.02 , __snake_case : Any=1E-12 , __snake_case : Optional[Any]=0 , __snake_case : List[str]=2 , __snake_case : str=1 , __snake_case : str="absolute" , __snake_case : Any=True , **__snake_case : Dict , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
| 177 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowerCamelCase = tuple[int, int]
class a :
'''simple docstring'''
def __init__( self : str , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
UpperCAmelCase_ = vertices
UpperCAmelCase_ = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def lowerCamelCase_ ( self : Any , __snake_case : EdgeT , __snake_case : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ = weight
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ = edge
UpperCAmelCase_ = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ = os.path.abspath(os.path.dirname(__UpperCamelCase ) )
UpperCAmelCase_ = os.path.join(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase_ = {}
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
with open(__UpperCamelCase ) as f:
UpperCAmelCase_ = f.read().strip().split('''\n''' )
UpperCAmelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(__UpperCamelCase ) ):
for edgea in range(__UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ = Graph(set(range(len(__UpperCamelCase ) ) ) , __UpperCamelCase )
UpperCAmelCase_ = graph.prims_algorithm()
UpperCAmelCase_ = sum(graph.edges.values() )
UpperCAmelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 177 | 1 |
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( _snake_case : str = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(_snake_case ):
__magic_name__ : Tuple = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_snake_case )[1] in (".py", ".ipynb"):
yield os.path.join(_snake_case , _snake_case ).lstrip("./" )
def lowerCAmelCase_ ( _snake_case : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return F'''{i * " "}*''' if i else "\n##"
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> str:
'''simple docstring'''
__magic_name__ : Union[str, Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_snake_case ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(_snake_case )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCAmelCase_ ( _snake_case : str = "." ) -> None:
'''simple docstring'''
__magic_name__ : Optional[int] = ""
for filepath in sorted(good_file_paths(_snake_case ) ):
__magic_name__ , __magic_name__ : int = os.path.split(_snake_case )
if filepath != old_path:
__magic_name__ : Dict = print_path(_snake_case , _snake_case )
__magic_name__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
__magic_name__ : List[str] = F'''{filepath}/{filename}'''.replace(" " , "%20" )
__magic_name__ : Dict = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(_snake_case )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 281 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> List[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : Dict = "mock-s3-bucket"
__magic_name__ : Any = F'''s3://{mock_bucket}'''
__magic_name__ : str = extract_path_from_uri(_snake_case )
assert dataset_path.startswith("s3://" ) is False
__magic_name__ : Tuple = "./local/path"
__magic_name__ : Optional[Any] = extract_path_from_uri(_snake_case )
assert dataset_path == new_dataset_path
def lowerCAmelCase_ ( _snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : str = is_remote_filesystem(_snake_case )
assert is_remote is True
__magic_name__ : Optional[int] = fsspec.filesystem("file" )
__magic_name__ : int = is_remote_filesystem(_snake_case )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _snake_case )
def lowerCAmelCase_ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
__magic_name__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
__magic_name__ : str = input_paths[compression_fs_class.protocol]
if input_path is None:
__magic_name__ : Dict = F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_snake_case )
__magic_name__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=_snake_case )
assert isinstance(_snake_case , _snake_case )
__magic_name__ : int = os.path.basename(_snake_case )
__magic_name__ : Optional[int] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_snake_case , "r" , encoding="utf-8" ) as f, open(_snake_case , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
__magic_name__ : int = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
__magic_name__ : int = compressed_file_paths[protocol]
__magic_name__ : Tuple = "dataset.jsonl"
__magic_name__ : List[str] = F'''{protocol}://{member_file_path}::{compressed_file_path}'''
__magic_name__ , *__magic_name__ : Optional[Any] = fsspec.get_fs_token_paths(_snake_case )
assert fs.isfile(_snake_case )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def lowerCAmelCase_ ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : int = hf_api.dataset_info(_snake_case , token=_snake_case )
__magic_name__ : Optional[Any] = HfFileSystem(repo_info=_snake_case , token=_snake_case )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_snake_case ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_snake_case , _snake_case , clobber=_snake_case )
with pytest.warns(_snake_case ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_snake_case ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 281 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowercase =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
__lowercase =[[0.0, 0.0], [0.0, 0.0]]
__lowercase , __lowercase =matrix[1][1], matrix[0][0]
__lowercase , __lowercase =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowercase =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
__lowercase =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowercase =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowercase =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowercase =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowercase =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowercase =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowercase =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowercase =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowercase =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowercase =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowercase =array(_lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
__lowercase =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowercase =array(_lowerCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 48 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def __lowerCAmelCase ( self ) -> Any:
_UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
_UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' )
_UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array''']
_UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
_UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
_UpperCAmelCase : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(A ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCAmelCase ( self ) -> int:
pass
| 263 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : str , __A : int=None , __A : List[Any]=None , __A : Any=None , __A : Union[str, Any]="resnet50" , __A : Union[str, Any]=3 , __A : List[str]=3_2 , __A : str=3 , __A : Tuple=True , __A : List[str]=True , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = out_indices if out_indices is not None else [4]
snake_case__ : Dict = stage_names
snake_case__ : str = out_features
snake_case__ : List[Any] = backbone
snake_case__ : Dict = batch_size
snake_case__ : Tuple = image_size
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = use_pretrained_backbone
snake_case__ : int = is_training
def _lowercase ( self : int ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values
def _lowercase ( self : Union[str, Any] ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowercase ( self : int , __A : Optional[Any] , __A : int ):
snake_case__ : Any = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _lowercase ( self : str ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
snake_case__ : Optional[int] = config_and_inputs
snake_case__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : List[Any] ):
snake_case__ : List[Any] = TimmBackboneModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__A , has_text_modality=__A )
def _lowercase ( self : List[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : str ):
snake_case__ : str = "resnet18"
snake_case__ : Any = "microsoft/resnet-18"
snake_case__ : Union[str, Any] = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A )
snake_case__ : Optional[Any] = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case__ : List[Any] = AutoBackbone.from_pretrained(__A , use_timm_backbone=__A , out_indices=[1, 2, 3] )
snake_case__ : str = AutoBackbone.from_pretrained(__A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def _lowercase ( self : int ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def _lowercase ( self : int ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _lowercase ( self : int ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _lowercase ( self : int ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _lowercase ( self : Union[str, Any] ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : List[str] ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _lowercase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _lowercase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def _lowercase ( self : int ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def _lowercase ( self : int ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Dict ):
pass
def _lowercase ( self : str ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
snake_case__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case__ : Tuple = self.all_model_classes[0]
snake_case__ : Any = model_class(__A )
model.to(__A )
snake_case__ : List[str] = self._prepare_for_class(__A , __A )
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : int = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case__ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case__ : Tuple = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowercase ( self : Tuple ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : Any = model(**__A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case__ : Optional[int] = copy.deepcopy(__A )
snake_case__ : Optional[Any] = None
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(**__A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case__ : Tuple = copy.deepcopy(__A )
snake_case__ : List[Any] = False
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(**__A )
| 350 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case__ : int = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Union[str, Any] = CLIPTextModel(__A )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , __A : int , __A : Any=0 ):
snake_case__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
snake_case__ : List[Any] = torch.manual_seed(__A )
else:
snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs(__A )
snake_case__ : List[str] = sd_pipe(**__A ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : List[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : List[Any] = "french fries"
snake_case__ : str = sd_pipe(**__A , negative_prompt=__A )
snake_case__ : Any = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Any = self.get_dummy_inputs(__A )
snake_case__ : Tuple = [inputs["prompt"]] * 2
snake_case__ : Any = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : List[str] = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
snake_case__ : str = image.permute(0 , 3 , 1 , 2 )
snake_case__ : int = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : str = sd_pipe(**__A ).images
snake_case__ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : int = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : Dict = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : Optional[Any] = sd_pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Union[str, Any] = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : str = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = VaeImageProcessor(do_resize=__A , do_normalize=__A )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Dict = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
snake_case__ : int = components["vae"]
snake_case__ : Union[str, Any] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : str = pipe(**__A )[0]
snake_case__ : Dict = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : str , __A : Dict=0 ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
snake_case__ : Tuple = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ : Optional[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : str ):
snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : Any = pipe(**__A ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Dict ):
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : int = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**__A ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
snake_case__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : int = latents[0, -3:, -3:, -1]
snake_case__ : Optional[int] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : Any = latents[0, -3:, -3:, -1]
snake_case__ : Dict = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : Any = False
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[Any] = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Dict = self.get_inputs()
snake_case__ : List[Any] = pipe(**__A )
snake_case__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self : Tuple ):
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Union[str, Any] = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case__ : Optional[Any] = "timbrooks/instruct-pix2pix"
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = pipe(**__A )
snake_case__ : Tuple = output.images[0]
snake_case__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 286 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : List[str] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __a (lowerCamelCase ):
__a : int = """decision_transformer"""
__a : int = ["""past_key_values"""]
__a : Optional[Any] = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __magic_name__ : Optional[Any]=17 , __magic_name__ : List[Any]=4 , __magic_name__ : Optional[Any]=1_28 , __magic_name__ : List[Any]=40_96 , __magic_name__ : Dict=True , __magic_name__ : str=1 , __magic_name__ : Any=10_24 , __magic_name__ : Tuple=3 , __magic_name__ : Optional[Any]=1 , __magic_name__ : str=None , __magic_name__ : List[str]="relu" , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[Any]=5_02_56 , __magic_name__ : Dict=5_02_56 , __magic_name__ : Tuple=False , __magic_name__ : Dict=False , **__magic_name__ : Optional[int] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = state_dim
UpperCAmelCase_ : str = act_dim
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : List[Any] = max_ep_len
UpperCAmelCase_ : Optional[Any] = action_tanh
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[int] = n_positions
UpperCAmelCase_ : Optional[Any] = n_layer
UpperCAmelCase_ : str = n_head
UpperCAmelCase_ : List[str] = n_inner
UpperCAmelCase_ : Dict = activation_function
UpperCAmelCase_ : Dict = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Union[str, Any] = attn_pdrop
UpperCAmelCase_ : List[str] = layer_norm_epsilon
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[Any] = scale_attn_weights
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Any = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Optional[int] = reorder_and_upcast_attn
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : List[Any] = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 125 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class a__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : str = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def _lowercase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
return ((n << b) | (n >> (3_2 - b))) & 0XFF_FF_FF_FF
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = B"""\x80""" + B"""\x00""" * (6_3 - (len(self.data ) + 8) % 6_4)
SCREAMING_SNAKE_CASE : List[str] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : int , UpperCAmelCase__ : Any ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = list(struct.unpack(""">16L""" , UpperCAmelCase__ ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.padding()
SCREAMING_SNAKE_CASE : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE : str = self.expand_block(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
SCREAMING_SNAKE_CASE : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE : str = 0X5A_82_79_99
elif 2_0 <= i < 4_0:
SCREAMING_SNAKE_CASE : List[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE : Any = 0X6E_D9_EB_A1
elif 4_0 <= i < 6_0:
SCREAMING_SNAKE_CASE : Union[str, Any] = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE : List[str] = 0X8F_1B_BC_DC
elif 6_0 <= i < 8_0:
SCREAMING_SNAKE_CASE : Dict = b ^ c ^ d
SCREAMING_SNAKE_CASE : int = 0XCA_62_C1_D6
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = (
self.rotate(UpperCAmelCase__ , 5 ) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(UpperCAmelCase__ , 3_0 ),
c,
d,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = B"""Test String"""
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def __lowercase ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
SCREAMING_SNAKE_CASE : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE : Tuple = bytes(_A , """utf-8""" )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 245 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = "▁"
_SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
lowerCAmelCase_ : List[int] = []
lowerCAmelCase_ : List[int] = []
def __init__( self , a__ , a__=None , a__=None , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ) -> None:
'''simple docstring'''
snake_case_ = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a__ , tgt_lang=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model )
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a__ )
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = src_lang if src_lang is not None else "en_XX"
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self , a__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def lowerCAmelCase__ ( self , a__ ) -> int:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase__ ( self , a__ ) -> str:
'''simple docstring'''
snake_case_ = []
snake_case_ = ""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(a__ )
snake_case_ = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a__ )) + suffix_ones
return prefix_ones + ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , **a__ ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
snake_case_ = src_lang
snake_case_ = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
snake_case_ = self.convert_tokens_to_ids(a__ )
snake_case_ = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self , a__ , a__ = "en_XX" , a__ = None , a__ = "ro_RO" , **a__ , ) -> BatchEncoding:
'''simple docstring'''
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.lang_code_to_id[src_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
def lowerCAmelCase__ ( self , a__ ) -> None:
'''simple docstring'''
snake_case_ = self.lang_code_to_id[tgt_lang]
snake_case_ = [self.cur_lang_code_id]
snake_case_ = [self.eos_token_id]
| 351 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE : Optional[Any] = "docs/source/en/_toctree.yml"
def UpperCamelCase_( snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = defaultdict(snake_case )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(snake_case ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(snake_case , key=lambda snake_case : s["title"].lower() )
def UpperCamelCase_( snake_case : Optional[int]=False ):
'''simple docstring'''
with open(snake_case , encoding="utf-8" ) as f:
snake_case_ = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]["sections"]
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]["sections"]
snake_case_ = [(idx, section) for idx, section in enumerate(snake_case ) if "sections" in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc["sections"]
snake_case_ = clean_model_doc_toc(snake_case )
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(snake_case , allow_unicode=snake_case ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 92 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : str = 16
_UpperCAmelCase : str = 32
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> str:
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Any = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Dict = 8
else:
lowerCamelCase__ : Any = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
lowerCamelCase__ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
# Initialize accelerator
lowerCamelCase__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : int = config['lr']
lowerCamelCase__ : Tuple = int(config['num_epochs'] )
lowerCamelCase__ : Union[str, Any] = int(config['seed'] )
lowerCamelCase__ : List[str] = int(config['batch_size'] )
lowerCamelCase__ : List[str] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : Any = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Any = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : int = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase__ : Dict = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : Optional[int] = model(**_UpperCAmelCase )
lowerCamelCase__ : Any = outputs.loss
lowerCamelCase__ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**_UpperCAmelCase )
lowerCamelCase__ : Any = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
lowerCamelCase__ : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Dict = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 50 |
snake_case_ : Dict = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 51 | 0 |
import cmath
import math
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> complex:
"""simple docstring"""
snake_case_ : Union[str, Any] = math.radians(_UpperCamelCase )
snake_case_ : int = math.radians(_UpperCamelCase )
# Convert voltage and current to rectangular form
snake_case_ : Dict = cmath.rect(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Dict = cmath.rect(_UpperCamelCase , _UpperCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowerCAmelCase :
lowerCamelCase_ : CommonSchedulerState
# setable values
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : Optional[int] = None
@classmethod
def lowerCamelCase (cls , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
return cls(common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ )
@dataclass
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : DDPMSchedulerState
class __lowerCAmelCase ( _a, _a ):
lowerCamelCase_ : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase_ : jnp.dtype
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return True
@register_to_config
def __init__(self , __magic_name__ = 1000 , __magic_name__ = 0.0_001 , __magic_name__ = 0.02 , __magic_name__ = "linear" , __magic_name__ = None , __magic_name__ = "fixed_small" , __magic_name__ = True , __magic_name__ = "epsilon" , __magic_name__ = jnp.floataa , ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = dtype
def lowerCamelCase (self , __magic_name__ = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
snake_case_ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case_ : Any = jnp.array(1.0 , dtype=self.dtype )
snake_case_ : Optional[int] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__magic_name__ , init_noise_sigma=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = () ) -> DDPMSchedulerState:
'''simple docstring'''
snake_case_ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case_ : List[str] = (jnp.arange(0 , __magic_name__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__magic_name__ , timesteps=__magic_name__ , )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ) -> int:
'''simple docstring'''
snake_case_ : Any = state.common.alphas_cumprod[t]
snake_case_ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case_ : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case_ : int = jnp.clip(__magic_name__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case_ : List[str] = jnp.log(jnp.clip(__magic_name__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
snake_case_ : str = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case_ : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case_ : str = variance
snake_case_ : Optional[Any] = state.common.betas[t]
snake_case_ : Optional[Any] = (predicted_variance + 1) / 2
snake_case_ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ : Tuple = timestep
if key is None:
snake_case_ : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case_ , snake_case_ : Any = jnp.split(__magic_name__ , sample.shape[1] , axis=1 )
else:
snake_case_ : Optional[Any] = None
# 1. compute alphas, betas
snake_case_ : List[Any] = state.common.alphas_cumprod[t]
snake_case_ : Union[str, Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case_ : List[Any] = 1 - alpha_prod_t
snake_case_ : str = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
snake_case_ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ : Optional[int] = jnp.clip(__magic_name__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case_ : Dict = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case_ : List[Any] = jax.random.split(__magic_name__ , num=1 )
snake_case_ : Tuple = jax.random.normal(__magic_name__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__magic_name__ , __magic_name__ , predicted_variance=__magic_name__ ) ** 0.5) * noise
snake_case_ : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case_ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__magic_name__ , state=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , __magic_name__ , __magic_name__ , __magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 279 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__snake_case = """docs/source/en/_toctree.yml"""
def __lowerCAmelCase ( lowercase : Any ) -> Tuple:
"""simple docstring"""
snake_case : Tuple = defaultdict(lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case : str = [key for key, value in counts.items() if value > 1]
snake_case : Optional[int] = []
for duplicate_key in duplicates:
snake_case : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(lowercase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(lowercase , key=lambda lowercase : s["title"].lower() )
def __lowerCAmelCase ( lowercase : Optional[int]=False ) -> str:
"""simple docstring"""
with open(lowercase , encoding="utf-8" ) as f:
snake_case : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case : Optional[Any] = content[api_idx]["sections"]
# Then to the model doc
snake_case : int = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case : List[str] = api_doc[model_idx]["sections"]
snake_case : List[Any] = [(idx, section) for idx, section in enumerate(lowercase ) if "sections" in section]
snake_case : Optional[Any] = False
for idx, modality_doc in modalities_docs:
snake_case : Tuple = modality_doc["sections"]
snake_case : List[str] = clean_model_doc_toc(lowercase )
if old_modality_doc != new_modality_doc:
snake_case : Optional[int] = True
if overwrite:
snake_case : List[str] = new_modality_doc
if diff:
if overwrite:
snake_case : int = model_doc
snake_case : Dict = api_doc
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowercase , allow_unicode=lowercase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__snake_case = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 203 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""google/rembert""": 256,
}
__snake_case = """▁"""
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = RemBertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="[CLS]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : int = do_lower_case
snake_case : Union[str, Any] = remove_space
snake_case : Optional[int] = keep_accents
snake_case : Any = vocab_file
snake_case : Any = False if not self.vocab_file else True
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__ ) )
return
snake_case : str = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 203 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = '''deberta-v2'''
def __init__( self : str , __lowerCamelCase : Union[str, Any]=1_2_8_1_0_0 , __lowerCamelCase : Optional[int]=1_5_3_6 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Optional[int]=2_4 , __lowerCamelCase : Tuple=6_1_4_4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=5_1_2 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : int=1e-7 , __lowerCamelCase : Any=False , __lowerCamelCase : Any=-1 , __lowerCamelCase : Tuple=0 , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=0 , __lowerCamelCase : Any="gelu" , **__lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = relative_attention
_SCREAMING_SNAKE_CASE = max_relative_positions
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(__lowerCamelCase ) == str:
_SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split("|" )]
_SCREAMING_SNAKE_CASE = pos_att_type
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = kwargs.get("pooler_hidden_size" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = pooler_dropout
_SCREAMING_SNAKE_CASE = pooler_hidden_act
class lowercase_ ( A ):
"""simple docstring"""
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "choice", 2: "sequence"}
else:
_SCREAMING_SNAKE_CASE = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1_2
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : int = 4_0 , __lowerCamelCase : "PreTrainedTokenizerBase" = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=__lowerCamelCase , framework=__lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 111 |
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( __A : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
_SCREAMING_SNAKE_CASE = nums[0]
for i in range(1 , len(__A ) ):
_SCREAMING_SNAKE_CASE = nums[i]
_SCREAMING_SNAKE_CASE = max(__A , ans + num , __A )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase_ = int(input('Enter number of elements : ').strip())
lowerCamelCase_ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 111 | 1 |
import os
# Precomputes a list of the 100 first triangular numbers
__a = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def a ( ):
'''simple docstring'''
lowercase_ = os.path.dirname(os.path.realpath(snake_case__ ) )
lowercase_ = os.path.join(snake_case__ , '''words.txt''' )
lowercase_ = ''''''
with open(snake_case__ ) as f:
lowercase_ = f.readline()
lowercase_ = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
lowercase_ = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution())
| 30 |
from __future__ import annotations
def __UpperCAmelCase ( a_ , a_ , a_ , a_): # noqa: E741
while r - l > 1:
snake_case_ = (l + r) // 2
if v[m] >= key:
snake_case_ = m
else:
snake_case_ = m # noqa: E741
return r
def __UpperCAmelCase ( a_):
if len(a_) == 0:
return 0
snake_case_ = [0] * len(a_)
snake_case_ = 1
snake_case_ = v[0]
for i in range(1 , len(a_)):
if v[i] < tail[0]:
snake_case_ = v[i]
elif v[i] > tail[length - 1]:
snake_case_ = v[i]
length += 1
else:
snake_case_ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
__lowerCAmelCase = ksize + 1
__lowerCAmelCase = np.zeros((ksize, ksize), dtype=np.floataa)
# each value
for y in range(__lowerCAmelCase):
for x in range(__lowerCAmelCase):
# distance from center
__lowerCAmelCase = x - ksize // 2
__lowerCAmelCase = y - ksize // 2
# degree to radiant
__lowerCAmelCase = theta / 1_8_0 * np.pi
__lowerCAmelCase = np.cos(_theta)
__lowerCAmelCase = np.sin(_theta)
# get kernel x
__lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
__lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
__lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2)) * np.cos(2 * np.pi * _x / lambd + psi)
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_UpperCAmelCase : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_UpperCAmelCase : Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_UpperCAmelCase : Optional[int] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_UpperCAmelCase : Optional[int] = out / out.max() * 2_5_5
_UpperCAmelCase : Tuple = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 370 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase : Dict = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowercase : Optional[Any] = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = SavedModel()
_snake_case = []
with open(os.path.join(__A , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
_snake_case = json.load(__A )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__A )] )
with open(__A , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
_snake_case = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_snake_case = sorted(__A )
_snake_case = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__A )
if strict and len(__A ) > 0:
raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(__A ) > 0:
print(F'Found the following incompatible ops for the opset {opset}:' )
print(*__A , sep='\n' )
else:
print(F'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
lowercase : List[Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 42 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase_ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = """cpu"""
UpperCamelCase_ = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
UpperCamelCase_ = """path-to-your-trained-model"""
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase_ = pipe.to(device)
# to channels last
UpperCamelCase_ = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase_ = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase_ = torch.randn(2, 4, 64, 64)
UpperCamelCase_ = torch.rand(1) * 999
UpperCamelCase_ = torch.randn(2, 77, 768)
UpperCamelCase_ = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase_ = 666
UpperCamelCase_ = torch.Generator(device).manual_seed(seed)
UpperCamelCase_ = {"""generator""": generator}
if args.steps is not None:
UpperCamelCase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 352 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class a_ :
UpperCamelCase__ : Optional[int] =None
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="dict"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self :List[Any]) -> List[Any]:
return self.pa_type
def __a ( self :Any , _lowercase :Union[str, bytes, dict]) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(_lowercase , _lowercase):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase_ = BytesIO()
sf.write(_lowercase , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase_ = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 32767
else:
UpperCAmelCase_ = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 32767
UpperCAmelCase_ = BytesIO(bytes())
sf.write(_lowercase , _lowercase , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def __a ( self :Dict , _lowercase :dict , _lowercase :Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
UpperCAmelCase_ , UpperCAmelCase_ = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
UpperCAmelCase_ = xsplitext(_lowercase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
UpperCAmelCase_ = token_per_repo_id or {}
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(_lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase_ = None
with xopen(_lowercase , '''rb''' , use_auth_token=_lowercase) as f:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
else:
UpperCAmelCase_ , UpperCAmelCase_ = sf.read(_lowercase)
UpperCAmelCase_ = array.T
if self.mono:
UpperCAmelCase_ = librosa.to_mono(_lowercase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase_ = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate)
UpperCAmelCase_ = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __a ( self :Union[str, Any]) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def __a ( self :int , _lowercase :Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
UpperCAmelCase_ = pa.array([Audio().encode_example(_lowercase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(_lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(_lowercase , self.pa_type)
def __a ( self :Any , _lowercase :pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :Tuple):
with xopen(_lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(_lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(_lowercase , self.pa_type)
| 344 | 0 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
__A = {
"b0": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_2_4,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_4_0,
"dropout_rate": 0.2,
"dw_padding": [1_6],
},
"b2": {
"hidden_dim": 1_4_0_8,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_6_0,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 1_6],
},
"b3": {
"hidden_dim": 1_5_3_6,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_0_0,
"dropout_rate": 0.3,
"dw_padding": [5, 1_8],
},
"b4": {
"hidden_dim": 1_7_9_2,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_8_0,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_0_4_8,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_5_6,
"dropout_rate": 0.4,
"dw_padding": [1_3, 2_7],
},
"b6": {
"hidden_dim": 2_3_0_4,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_2_8,
"dropout_rate": 0.5,
"dw_padding": [3_1],
},
"b7": {
"hidden_dim": 2_5_6_0,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_0_0,
"dropout_rate": 0.5,
"dw_padding": [1_8],
},
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: str = EfficientNetConfig()
lowercase__: int = CONFIG_MAP[model_name]['''hidden_dim''']
lowercase__: List[Any] = CONFIG_MAP[model_name]['''width_coef''']
lowercase__: Any = CONFIG_MAP[model_name]['''depth_coef''']
lowercase__: Dict = CONFIG_MAP[model_name]['''image_size''']
lowercase__: Union[str, Any] = CONFIG_MAP[model_name]['''dropout_rate''']
lowercase__: Optional[int] = CONFIG_MAP[model_name]['''dw_padding''']
lowercase__: Dict = '''huggingface/label-files'''
lowercase__: int = '''imagenet-1k-id2label.json'''
lowercase__: Optional[int] = 1_0_0_0
lowercase__: Tuple = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__: List[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase__: Any = idalabel
lowercase__: Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
lowercase__: str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__: List[Any] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
lowercase__: str = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__UpperCAmelCase , )
return preprocessor
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
lowercase__: str = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
lowercase__: str = sorted(set(__UpperCAmelCase ) )
lowercase__: Dict = len(__UpperCAmelCase )
lowercase__: List[Any] = {b: str(__UpperCAmelCase ) for b, i in zip(__UpperCAmelCase , range(__UpperCAmelCase ) )}
lowercase__: Optional[int] = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
lowercase__: int = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
lowercase__: Tuple = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__: Any = '''efficientnet.''' + item[1]
lowercase__: Any = '''classifier.weight'''
lowercase__: Optional[Any] = '''classifier.bias'''
return key_mapping
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__: Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__: Optional[int] = torch.from_numpy(__UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__: Optional[Any] = torch.from_numpy(__UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__: List[str] = torch.from_numpy(np.transpose(__UpperCAmelCase ) )
else:
lowercase__: Any = torch.from_numpy(__UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCAmelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: Optional[Any] = model_classes[model_name](
include_top=__UpperCAmelCase , weights='''imagenet''' , input_tensor=__UpperCAmelCase , input_shape=__UpperCAmelCase , pooling=__UpperCAmelCase , classes=1_0_0_0 , classifier_activation='''softmax''' , )
lowercase__: Union[str, Any] = original_model.trainable_variables
lowercase__: Union[str, Any] = original_model.non_trainable_variables
lowercase__: Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__: Any = param.numpy()
lowercase__: str = list(tf_params.keys() )
# Load HuggingFace model
lowercase__: int = get_efficientnet_config(__UpperCAmelCase )
lowercase__: Optional[int] = EfficientNetForImageClassification(__UpperCAmelCase ).eval()
lowercase__: Any = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
lowercase__: List[str] = rename_keys(__UpperCAmelCase )
replace_params(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Initialize preprocessor and preprocess input image
lowercase__: Union[str, Any] = convert_image_processor(__UpperCAmelCase )
lowercase__: List[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__: Optional[int] = hf_model(**__UpperCAmelCase )
lowercase__: str = outputs.logits.detach().numpy()
# Original model inference
lowercase__: Tuple = False
lowercase__: Optional[Any] = CONFIG_MAP[model_name]['''image_size''']
lowercase__: List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__: str = image.img_to_array(__UpperCAmelCase )
lowercase__: List[Any] = np.expand_dims(__UpperCAmelCase , axis=0 )
lowercase__: str = original_model.predict(__UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCAmelCase ):
os.mkdir(__UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(__UpperCAmelCase )
preprocessor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__: List[Any] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__UpperCAmelCase )
hf_model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
__A = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 177 | """simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
if not arr:
return 0
lowercase__: Any = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__: Union[str, Any] = 0.0
for num in arr:
lowercase__: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__: int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 177 | 1 |
from collections.abc import Generator
def __UpperCamelCase () -> Generator[int, None, None]:
lowercase__ , lowercase__ = 0, 1
while True:
lowercase__ , lowercase__ = b, a + b
yield b
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000 ) -> int:
lowercase__ = 1
lowercase__ = fibonacci_generator()
while len(str(next(_SCREAMING_SNAKE_CASE ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 269 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowercase_ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowercase_ = {
"""allenai/longformer-base-4096""": 4_096,
"""allenai/longformer-large-4096""": 4_096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4_096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4_096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCamelCase () -> Union[str, Any]:
lowercase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self : Dict , a : Union[str, Any] , a : Optional[Any] , a : List[str]="replace" , a : Optional[int]="<s>" , a : List[str]="</s>" , a : List[Any]="</s>" , a : Union[str, Any]="<s>" , a : Any="<unk>" , a : Optional[int]="<pad>" , a : Optional[Any]="<mask>" , a : Tuple=False , **a : List[Any] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding='utf-8' ) as vocab_handle:
lowercase__ = json.load(a )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding='utf-8' ) as merges_handle:
lowercase__ = merges_handle.read().split('\n' )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(a , range(len(a ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[Any] )-> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(a )
lowercase__ = get_pairs(a )
if not pairs:
return token
while True:
lowercase__ = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(a ):
try:
lowercase__ = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(a )
lowercase__ = new_word
if len(a ) == 1:
break
else:
lowercase__ = get_pairs(a )
lowercase__ = ' '.join(a )
lowercase__ = word
return word
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = []
for token in re.findall(self.pat , a ):
lowercase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] )-> Optional[int]:
"""simple docstring"""
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.decoder.get(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = ''.join(a )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' )
lowercase__ = 0
with open(a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
lowercase__ = token_index
writer.write(' '.join(a ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Dict , a : Dict=False , **a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase__ = ' ' + text
return (text, kwargs)
| 269 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 48 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 1 |
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
for i in range(len(_lowercase ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Union[str, Any] = False
for j in range(_lowercase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : Tuple = True
for j in range(_lowercase ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_, UpperCAmelCase_ : Dict = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item) for item in user_input.split(',')]
print(F"""{cocktail_shaker_sort(unsorted) = }""") | 235 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
lowerCAmelCase = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
lowerCAmelCase = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
lowerCAmelCase = field(
default=1_0000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
lowerCAmelCase = field(default=2E-4 , metadata={'''help''': '''Learning rate fo training.'''} )
lowerCAmelCase = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
lowerCAmelCase = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
lowerCAmelCase = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
lowerCAmelCase = field(default=5_0000 , metadata={'''help''': '''Maximum number of training steps.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Training seed.'''} )
lowerCAmelCase = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
lowerCAmelCase = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
lowerCAmelCase = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
lowerCAmelCase = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
lowerCAmelCase = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
lowerCAmelCase = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
lowerCAmelCase = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
lowerCAmelCase = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
lowerCAmelCase = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
lowerCAmelCase = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
lowerCAmelCase = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
lowerCAmelCase = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
lowerCAmelCase = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default=_a , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
lowerCAmelCase = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
lowerCAmelCase = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
lowerCAmelCase = field(
default=10_0000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
lowerCAmelCase = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
lowerCAmelCase = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
lowerCAmelCase = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
lowerCAmelCase = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
lowerCAmelCase = field(default=20_0000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
lowerCAmelCase = field(
default=3_2768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
lowerCAmelCase = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
lowerCAmelCase = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
lowerCAmelCase = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Push saved tokenizer to the hub.'''} ) | 235 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase__ ):
__lowerCamelCase : Optional[int] = """mask2former"""
__lowerCamelCase : Union[str, Any] = ["""swin"""]
__lowerCamelCase : Optional[Any] = {"""hidden_size""": """hidden_dim"""}
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 1024 , _lowerCAmelCase = "relu" , _lowerCAmelCase = 6 , _lowerCAmelCase = 10 , _lowerCAmelCase = 8 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 2048 , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = 4 , _lowerCAmelCase = 255 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 2.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 12544 , _lowerCAmelCase = 3.0 , _lowerCAmelCase = 0.75 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = True , _lowerCAmelCase = [4, 8, 16, 32] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
_lowerCAmelCase = CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=snake_case_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = backbone_config.pop("model_type" )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(snake_case_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
_lowerCAmelCase = backbone_config
_lowerCAmelCase = feature_size
_lowerCAmelCase = mask_feature_size
_lowerCAmelCase = hidden_dim
_lowerCAmelCase = encoder_feedforward_dim
_lowerCAmelCase = activation_function
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = dim_feedforward
_lowerCAmelCase = pre_norm
_lowerCAmelCase = enforce_input_projection
_lowerCAmelCase = common_stride
_lowerCAmelCase = ignore_value
_lowerCAmelCase = num_queries
_lowerCAmelCase = no_object_weight
_lowerCAmelCase = class_weight
_lowerCAmelCase = mask_weight
_lowerCAmelCase = dice_weight
_lowerCAmelCase = train_num_points
_lowerCAmelCase = oversample_ratio
_lowerCAmelCase = importance_sample_ratio
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = feature_strides
_lowerCAmelCase = output_auxiliary_logits
_lowerCAmelCase = decoder_layers
super().__init__(**snake_case_ )
@classmethod
def _snake_case ( cls , _lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
return cls(
backbone_config=snake_case_ , **snake_case_ , )
def _snake_case ( self ) -> int:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 158 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 0 |
from __future__ import annotations
from math import pi
def _a ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 51 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[Any] , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 110 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
if hor == 1_28:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D")
elif hor == 32:
__lowerCAmelCase = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
__lowerCAmelCase = (32, 64, 1_28, 2_56)
__lowerCAmelCase = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
__lowerCAmelCase = torch.load(F"""/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch""" )
__lowerCAmelCase = model.state_dict()
__lowerCAmelCase = {
"down_block_types": down_block_types,
"block_out_channels": block_out_channels,
"up_block_types": up_block_types,
"layers_per_block": 1,
"use_timestep_embedding": True,
"out_block_type": "OutConv1DBlock",
"norm_num_groups": 8,
"downsample_each_block": False,
"in_channels": 14,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"flip_sin_to_cos": False,
"freq_shift": 1,
"sample_size": 6_55_36,
"mid_block_type": "MidResTemporalBlock1D",
"act_fn": "mish",
}
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F"""hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin""" )
with open(F"""hub/hopper-medium-v2/unet/hor{hor}/config.json""" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( ):
__lowerCAmelCase = {
"in_channels": 14,
"down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
"up_block_types": (),
"out_block_type": "ValueFunction",
"mid_block_type": "ValueFunctionMidBlock1D",
"block_out_channels": (32, 64, 1_28, 2_56),
"layers_per_block": 1,
"downsample_each_block": True,
"sample_size": 6_55_36,
"out_channels": 14,
"extra_in_channels": 0,
"time_embedding_type": "positional",
"use_timestep_embedding": True,
"flip_sin_to_cos": False,
"freq_shift": 1,
"norm_num_groups": 8,
"act_fn": "mish",
}
__lowerCAmelCase = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch" )
__lowerCAmelCase = model
__lowerCAmelCase = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F"""length of state dict: {len(state_dict.keys() )}""" )
print(F"""length of value function dict: {len(hf_value_function.state_dict().keys() )}""" )
__lowerCAmelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin" )
with open("hub/hopper-medium-v2/value_function/config.json" , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 92 | 0 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = [int(A__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(A__ ) == 4 and all(0 <= int(A__ ) <= 254 for octet in octets )
if __name__ == "__main__":
lowerCAmelCase__ = input().strip()
lowerCAmelCase__ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 358 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 52 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase : Optional[Any] = {'''facebook/blenderbot_small-90M''': 5_12}
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[int]:
__A : int = set()
__A : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A : Optional[int] = char
__A : Any = set(a__ )
return pairs
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Any = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _A , _A , _A="__start__" , _A="__end__" , _A="__unk__" , _A="__null__" , **_A , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__A : str = json.load(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__A : List[str] = merges_handle.read().split('\n' )[1:-1]
__A : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
__A : List[str] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__A : List[str] = {}
@property
def UpperCAmelCase_ ( self ):
return len(self.encoder )
def UpperCAmelCase_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _A ):
if token in self.cache:
return self.cache[token]
__A : Dict = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE )
__A : Tuple = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE )
if "\n" in token:
__A : Dict = token.replace('\n' , ' __newln__' )
__A : Tuple = token.split(' ' )
__A : Optional[int] = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
__A : int = token.lower()
__A : List[str] = tuple(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__A : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
__A : Optional[Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _A : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A : List[Any] = bigram
__A : int = []
__A : Optional[int] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
__A : int = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
__A : List[str] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
__A : Any = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
__A : int = get_pairs(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = '@@ '.join(_SCREAMING_SNAKE_CASE )
__A : List[Any] = word[:-4]
__A : List[Any] = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( self , _A ):
__A : Any = []
__A : Union[str, Any] = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A , _A = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__A : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
__A : Optional[Any] = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__A : Tuple = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file
| 280 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[str]=13 , _snake_case : Any=7 , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : Dict=True , _snake_case : Dict=True , _snake_case : List[str]=99 , _snake_case : Dict=16 , _snake_case : Tuple=36 , _snake_case : int=6 , _snake_case : Tuple=6 , _snake_case : Any=6 , _snake_case : Any=37 , _snake_case : Tuple="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[int]=512 , _snake_case : Union[str, Any]=16 , _snake_case : Any=2 , _snake_case : List[Any]=0.0_2 , _snake_case : Dict=3 , _snake_case : List[Any]=4 , _snake_case : List[str]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int):
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase ( self : Dict , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_)
UpperCAmelCase_ = model(snake_case_ , token_type_ids=snake_case_)
UpperCAmelCase_ = model(snake_case_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForPreTraining(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , sentence_order_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def lowerCamelCase ( self : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : int , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = AlbertForMaskedLM(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Any , _snake_case : int , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AlbertForQuestionAnswering(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : str , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=snake_case_)
model.to(snake_case_)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) = config_and_inputs
UpperCAmelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _a , _a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = True
def lowerCamelCase ( self : Any , _snake_case : List[str] , _snake_case : Any , _snake_case : List[Any]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_)
if return_labels:
if model_class in get_values(snake_case_):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_)
return inputs_dict
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AlbertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=snake_case_ , hidden_size=37)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case_)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case_)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*snake_case_)
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(snake_case_)
self.assertIsNotNone(snake_case_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''')
UpperCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
UpperCAmelCase_ = model(snake_case_ , attention_mask=snake_case_)[0]
UpperCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , snake_case_)
UpperCAmelCase_ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4))
| 370 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def A__ ( SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: Tuple = SwinvaConfig()
__snake_case: List[Any] = swinva_name.split("""_""")
__snake_case: List[Any] = name_split[1]
if "to" in name_split[3]:
__snake_case: Dict = int(name_split[3][-3:])
else:
__snake_case: Dict = int(name_split[3])
if "to" in name_split[2]:
__snake_case: Optional[int] = int(name_split[2][-2:])
else:
__snake_case: Optional[int] = int(name_split[2][6:])
if model_size == "tiny":
__snake_case: Any = 96
__snake_case: str = (2, 2, 6, 2)
__snake_case: List[Any] = (3, 6, 12, 24)
elif model_size == "small":
__snake_case: Tuple = 96
__snake_case: int = (2, 2, 18, 2)
__snake_case: int = (3, 6, 12, 24)
elif model_size == "base":
__snake_case: str = 128
__snake_case: Optional[Any] = (2, 2, 18, 2)
__snake_case: str = (4, 8, 16, 32)
else:
__snake_case: Any = 192
__snake_case: Any = (2, 2, 18, 2)
__snake_case: str = (6, 12, 24, 48)
if "to" in swinva_name:
__snake_case: str = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__snake_case: Any = 2_1841
__snake_case: List[str] = """huggingface/label-files"""
__snake_case: Optional[int] = """imagenet-22k-id2label.json"""
__snake_case: List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""") , """r"""))
__snake_case: Union[str, Any] = {int(SCREAMING_SNAKE_CASE__): v for k, v in idalabel.items()}
__snake_case: Dict = idalabel
__snake_case: Tuple = {v: k for k, v in idalabel.items()}
else:
__snake_case: Optional[int] = 1000
__snake_case: Optional[Any] = """huggingface/label-files"""
__snake_case: Dict = """imagenet-1k-id2label.json"""
__snake_case: Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""") , """r"""))
__snake_case: Dict = {int(SCREAMING_SNAKE_CASE__): v for k, v in idalabel.items()}
__snake_case: Dict = idalabel
__snake_case: Optional[Any] = {v: k for k, v in idalabel.items()}
__snake_case: Optional[Any] = img_size
__snake_case: List[str] = num_classes
__snake_case: Optional[Any] = embed_dim
__snake_case: Any = depths
__snake_case: Any = num_heads
__snake_case: int = window_size
return config
def A__ ( SCREAMING_SNAKE_CASE__) -> List[Any]:
if "patch_embed.proj" in name:
__snake_case: str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
__snake_case: Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
__snake_case: int = """encoder.""" + name
if "attn.proj" in name:
__snake_case: Union[str, Any] = name.replace("""attn.proj""" , """attention.output.dense""")
if "attn" in name:
__snake_case: int = name.replace("""attn""" , """attention.self""")
if "norm1" in name:
__snake_case: Optional[Any] = name.replace("""norm1""" , """layernorm_before""")
if "norm2" in name:
__snake_case: Optional[Any] = name.replace("""norm2""" , """layernorm_after""")
if "mlp.fc1" in name:
__snake_case: Any = name.replace("""mlp.fc1""" , """intermediate.dense""")
if "mlp.fc2" in name:
__snake_case: Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""")
if "q_bias" in name:
__snake_case: int = name.replace("""q_bias""" , """query.bias""")
if "k_bias" in name:
__snake_case: List[Any] = name.replace("""k_bias""" , """key.bias""")
if "v_bias" in name:
__snake_case: Optional[Any] = name.replace("""v_bias""" , """value.bias""")
if "cpb_mlp" in name:
__snake_case: Optional[Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""")
if name == "norm.weight":
__snake_case: Optional[int] = """layernorm.weight"""
if name == "norm.bias":
__snake_case: Union[str, Any] = """layernorm.bias"""
if "head" in name:
__snake_case: Optional[int] = name.replace("""head""" , """classifier""")
else:
__snake_case: List[str] = """swinv2.""" + name
return name
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
for key in orig_state_dict.copy().keys():
__snake_case: List[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE__)
if "mask" in key:
continue
elif "qkv" in key:
__snake_case: Tuple = key.split(""".""")
__snake_case: Union[str, Any] = int(key_split[1])
__snake_case: List[Any] = int(key_split[3])
__snake_case: Union[str, Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__snake_case: Any = val[:dim, :]
__snake_case: Tuple = val[dim : dim * 2, :]
__snake_case: List[Any] = val[-dim:, :]
else:
__snake_case: int = val[:dim]
__snake_case: List[str] = val[
dim : dim * 2
]
__snake_case: Optional[int] = val[-dim:]
else:
__snake_case: List[Any] = val
return orig_state_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__)
timm_model.eval()
__snake_case: Any = get_swinva_config(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__)
model.eval()
__snake_case: Optional[Any] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__)
model.load_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case: Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""")))
__snake_case: int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__).raw)
__snake_case: int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""")
__snake_case: Union[str, Any] = timm_model(inputs["""pixel_values"""])
__snake_case: Optional[Any] = model(**SCREAMING_SNAKE_CASE__).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3)
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(SCREAMING_SNAKE_CASE__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__)
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swinv2_name",
default="swinv2_tiny_patch4_window8_256",
type=str,
help="Name of the Swinv2 timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCAmelCase : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 111 |
__UpperCAmelCase : int = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 111 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class A :
lowercase_ = 42
lowercase_ = 42
class A :
def __init__( self : Optional[int] , lowerCAmelCase_ : int ) -> int:
"""simple docstring"""
_a = [[] for _ in range(lowerCAmelCase_ )]
_a = size
def __getitem__( self : Dict , lowerCAmelCase_ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self._size
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(lowerCAmelCase_ , lowerCAmelCase_ ) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> int | None:
"""simple docstring"""
_a = deque([start_vertex] )
_a = [None] * self.size
_a = 0
while queue:
_a = queue.popleft()
_a = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_a = current_distance + edge.weight
_a = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
and new_distance >= dest_vertex_distance
):
continue
_a = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['YolosFeatureExtractor']
_snake_case : Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 1 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A: Any = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A: List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _snake_case ( UpperCamelCase : str ):
if "://" in dataset_path:
UpperCAmelCase : int = dataset_path.split("""://""" )[1]
return dataset_path
def _snake_case ( UpperCamelCase : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _snake_case ( UpperCamelCase : fsspec.AbstractFileSystem , UpperCamelCase : str , UpperCamelCase : str ):
UpperCAmelCase : Optional[int] = not is_remote_filesystem(UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(UpperCamelCase ) , fs._strip_protocol(UpperCamelCase ) )
else:
fs.mv(UpperCamelCase , UpperCamelCase , recursive=UpperCamelCase )
def _snake_case ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[int] = threading.Lock()
| 109 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = ['''input_features''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__=80 ,SCREAMING_SNAKE_CASE__=1_60_00 ,SCREAMING_SNAKE_CASE__=80 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> List[Any]:
"""simple docstring"""
super().__init__(feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = num_mel_bins
__SCREAMING_SNAKE_CASE :Optional[int] = do_ceptral_normalize
__SCREAMING_SNAKE_CASE :Any = normalize_means
__SCREAMING_SNAKE_CASE :Dict = normalize_vars
__SCREAMING_SNAKE_CASE :Union[str, Any] = True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__SCREAMING_SNAKE_CASE :Any = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE :Tuple = ta_kaldi.fbank(SCREAMING_SNAKE_CASE__ ,num_mel_bins=self.num_mel_bins ,sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = True ,SCREAMING_SNAKE_CASE__ = 0.0 ,) -> np.ndarray:
"""simple docstring"""
if normalize_means:
__SCREAMING_SNAKE_CASE :Any = x[:input_length].mean(axis=0 )
__SCREAMING_SNAKE_CASE :Tuple = np.subtract(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if normalize_vars:
__SCREAMING_SNAKE_CASE :str = x[:input_length].std(axis=0 )
__SCREAMING_SNAKE_CASE :int = np.divide(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if input_length < x.shape[0]:
__SCREAMING_SNAKE_CASE :List[Any] = padding_value
# make sure array is in float32
__SCREAMING_SNAKE_CASE :Tuple = x.astype(np.floataa )
return x
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> List[np.ndarray]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.normalize_means ,self.normalize_vars ,self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
]
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__SCREAMING_SNAKE_CASE :Tuple = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE :Tuple = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ):
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE :Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [raw_speech]
# extract fbank features
__SCREAMING_SNAKE_CASE :Optional[Any] = [self._extract_fbank_features(SCREAMING_SNAKE_CASE__ ) for waveform in raw_speech]
# convert into correct format for padding
__SCREAMING_SNAKE_CASE :Dict = BatchFeature({'''input_features''': features} )
__SCREAMING_SNAKE_CASE :Tuple = self.pad(
SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# make sure list is in array format
__SCREAMING_SNAKE_CASE :List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Tuple = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa ) for feature in input_features]
__SCREAMING_SNAKE_CASE :Union[str, Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE :str = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__SCREAMING_SNAKE_CASE :Optional[Any] = (
np.array(SCREAMING_SNAKE_CASE__ ,dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE :int = self.normalize(
padded_inputs['''input_features'''] ,attention_mask=SCREAMING_SNAKE_CASE__ )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE :List[Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE__ )
return padded_inputs | 239 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : List[Any] = '''levit'''
def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=[1_28, 2_56, 3_84] ,SCREAMING_SNAKE_CASE__=[4, 8, 12] ,SCREAMING_SNAKE_CASE__=[4, 4, 4] ,SCREAMING_SNAKE_CASE__=[16, 16, 16] ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=[2, 2, 2] ,SCREAMING_SNAKE_CASE__=0.0_2 ,**SCREAMING_SNAKE_CASE__ ,) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = image_size
__SCREAMING_SNAKE_CASE :Dict = num_channels
__SCREAMING_SNAKE_CASE :Optional[int] = kernel_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = stride
__SCREAMING_SNAKE_CASE :List[Any] = padding
__SCREAMING_SNAKE_CASE :Tuple = hidden_sizes
__SCREAMING_SNAKE_CASE :List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE :Optional[int] = depths
__SCREAMING_SNAKE_CASE :Optional[Any] = key_dim
__SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :int = attention_ratio
__SCREAMING_SNAKE_CASE :List[Any] = mlp_ratio
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :Optional[Any] = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : int = version.parse('''1.11''' )
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1E-4 | 239 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE , allow_abbrev=SCREAMING_SNAKE_CASE )
# The main config parser
lowerCAmelCase = config_command_parser(SCREAMING_SNAKE_CASE )
# The subparser to add commands to
lowerCAmelCase = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE , parents=[parent_parser] )
return config_parser
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = get_config_parser()
lowerCAmelCase = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCamelCase__ : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = state_dict.pop(a_ )
A_ : Tuple = val
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
A_ : str = value
else:
A_ : int = value
return new_state_dict
def UpperCAmelCase ( a_ , a_=False ) -> Optional[int]:
"""simple docstring"""
A_ : List[Any] = """"""
if is_panoptic:
A_ : Any = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Optional[Any] = in_proj_weight[:2_5_6, :]
A_ : Tuple = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : int = in_proj_bias[2_5_6:5_1_2]
A_ : int = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
A_ : int = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
A_ : str = """resnet101"""
if "dc5" in model_name:
A_ : List[Any] = True
A_ : str = """panoptic""" in model_name
if is_panoptic:
A_ : Dict = 2_5_0
else:
A_ : Union[str, Any] = 9_1
A_ : str = """huggingface/label-files"""
A_ : Union[str, Any] = """coco-detection-id2label.json"""
A_ : Optional[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) )
A_ : str = {int(a_ ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
A_ : List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
A_ : Any = ConditionalDetrImageProcessor(format=a_ )
# prepare image
A_ : Tuple = prepare_img()
A_ : Any = image_processor(images=a_ , return_tensors="""pt""" )
A_ : Optional[int] = encoding["""pixel_values"""]
logger.info(F"Converting model {model_name}..." )
# load original model from torch hub
A_ : int = torch.hub.load("""DeppMeng/ConditionalDETR""" , a_ , pretrained=a_ ).eval()
A_ : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
A_ : Union[str, Any] = """conditional_detr.""" + src
rename_key(a_ , a_ , a_ )
A_ : Any = rename_backbone_keys(a_ )
# query, key and value matrices need special treatment
read_in_q_k_v(a_ , is_panoptic=a_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
A_ : Dict = state_dict.pop(a_ )
A_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
A_ : str = state_dict.pop(a_ )
A_ : Any = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
A_ : Optional[int] = state_dict.pop(a_ )
A_ : str = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
A_ : Tuple = state_dict.pop(a_ )
A_ : Dict = val
# finally, create HuggingFace model and load state dict
A_ : Union[str, Any] = ConditionalDetrForSegmentation(a_ ) if is_panoptic else ConditionalDetrForObjectDetection(a_ )
model.load_state_dict(a_ )
model.eval()
model.push_to_hub(repo_id=a_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
A_ : str = conditional_detr(a_ )
A_ : str = model(a_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCamelCase__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 344 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = """ybelkada/fonts"""
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> Tuple:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(SCREAMING_SNAKE_CASE_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 )
A__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int = 3_6 , SCREAMING_SNAKE_CASE_: str = "black" , SCREAMING_SNAKE_CASE_: str = "white" , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: int = 5 , SCREAMING_SNAKE_CASE_: Optional[bytes] = None , SCREAMING_SNAKE_CASE_: Optional[str] = None , ) -> Image.Image:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=8_0 )
A__ = wrapper.wrap(text=SCREAMING_SNAKE_CASE_ )
A__ = "\n".join(SCREAMING_SNAKE_CASE_ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(SCREAMING_SNAKE_CASE_ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(SCREAMING_SNAKE_CASE_ , "Arial.TTF" )
A__ = ImageFont.truetype(SCREAMING_SNAKE_CASE_ , encoding="UTF-8" , size=SCREAMING_SNAKE_CASE_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , SCREAMING_SNAKE_CASE_ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new("RGB" , (image_width, image_height) , SCREAMING_SNAKE_CASE_ )
A__ = ImageDraw.Draw(SCREAMING_SNAKE_CASE_ )
draw.text(xy=(left_padding, top_padding) , text=SCREAMING_SNAKE_CASE_ , fill=SCREAMING_SNAKE_CASE_ , font=SCREAMING_SNAKE_CASE_ )
return image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: np.ndarray , SCREAMING_SNAKE_CASE_: str , **SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , "vision" )
# Convert to PIL image if necessary
A__ = to_pil_image(SCREAMING_SNAKE_CASE_ )
A__ = render_text(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = max(header_image.width , image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(SCREAMING_SNAKE_CASE_ )
if infer_channel_dimension_format(SCREAMING_SNAKE_CASE_ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , ChannelDimension.LAST )
return new_image
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['flattened_patches']
def __init__( self , lowercase = True , lowercase = True , lowercase = None , lowercase = 2048 , lowercase = False , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase )
A__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def UpperCamelCase ( self , lowercase , lowercase , lowercase , **lowercase ) -> np.ndarray:
'''simple docstring'''
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(lowercase , ChannelDimension.FIRST )
A__ = torch.from_numpy(lowercase )
A__ , A__ = patch_size["height"], patch_size["width"]
A__ , A__ = get_image_size(lowercase )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , lowercase ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , lowercase ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=lowercase , antialias=lowercase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(lowercase , lowercase , lowercase )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(lowercase ).reshape([rows, 1] ).repeat(1 , lowercase ).reshape([rows * columns, 1] )
A__ = torch.arange(lowercase ).reshape([1, columns] ).repeat(lowercase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(lowercase , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(lowercase )
return result
def UpperCamelCase ( self , lowercase , lowercase = None , **lowercase ) -> np.ndarray:
'''simple docstring'''
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(lowercase )
A__ = np.std(lowercase )
A__ = max(lowercase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase , mean=lowercase , std=lowercase , **lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> ImageInput:
'''simple docstring'''
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get("data_format" , lowercase ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
A__ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(lowercase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
A__ = kwargs.pop("font_bytes" , lowercase )
A__ = kwargs.pop("font_path" , lowercase )
if isinstance(lowercase , lowercase ):
A__ = [header_text] * len(lowercase )
A__ = [
render_header(lowercase , header_text[i] , font_bytes=lowercase , font_path=lowercase )
for i, image in enumerate(lowercase )
]
if do_normalize:
A__ = [self.normalize(image=lowercase ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=lowercase , max_patches=lowercase , patch_size=lowercase )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=lowercase )
return encoded_outputs
| 68 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCamelCase: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase: Dict = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class a__ ( __snake_case ):
_lowerCamelCase = 42
class a__ ( __snake_case ):
def __init__( self : Union[str, Any], lowerCAmelCase : PriorTransformer, lowerCAmelCase : CLIPVisionModel, lowerCAmelCase : CLIPImageProcessor, lowerCAmelCase : HeunDiscreteScheduler, lowerCAmelCase : ShapERenderer, ) -> int:
super().__init__()
self.register_modules(
prior=lowerCamelCase_, image_encoder=lowerCamelCase_, image_processor=lowerCamelCase_, scheduler=lowerCamelCase_, renderer=lowerCamelCase_, )
def lowercase ( self : Tuple, lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[int], lowerCAmelCase : Optional[int] ) -> List[Any]:
if latents is None:
lowercase : Dict = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=lowerCamelCase_, dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase : int = latents.to(lowerCamelCase_ )
lowercase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Dict, lowerCAmelCase : Tuple=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase : Optional[int] = torch.device(f'''cuda:{gpu_id}''' )
lowercase : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_, lowerCamelCase_ )
@property
def lowercase ( self : List[Any] ) -> str:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder, '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCamelCase_, '_hf_hook' )
and hasattr(module._hf_hook, 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : List[Any], lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : int, lowerCAmelCase : Any, ) -> Dict:
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and isinstance(image[0], torch.Tensor ):
lowercase : int = torch.cat(lowerCamelCase_, axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCamelCase_, axis=0 )
if not isinstance(lowerCamelCase_, torch.Tensor ):
lowercase : Dict = self.image_processor(lowerCamelCase_, return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
lowercase : Any = image.to(dtype=self.image_encoder.dtype, device=lowerCamelCase_ )
lowercase : Dict = self.image_encoder(lowerCamelCase_ )["""last_hidden_state"""]
lowercase : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
lowercase : Dict = image_embeds.repeat_interleave(lowerCamelCase_, dim=0 )
if do_classifier_free_guidance:
lowercase : Optional[Any] = torch.zeros_like(lowerCamelCase_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self : Tuple, lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]], lowerCAmelCase : int = 1, lowerCAmelCase : int = 25, lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCAmelCase : Optional[torch.FloatTensor] = None, lowerCAmelCase : float = 4.0, lowerCAmelCase : int = 64, lowerCAmelCase : Optional[str] = "pil", lowerCAmelCase : bool = True, ) -> str:
if isinstance(lowerCamelCase_, PIL.Image.Image ):
lowercase : int = 1
elif isinstance(lowerCamelCase_, torch.Tensor ):
lowercase : str = image.shape[0]
elif isinstance(lowerCamelCase_, lowerCamelCase_ ) and isinstance(image[0], (torch.Tensor, PIL.Image.Image) ):
lowercase : Optional[int] = len(lowerCamelCase_ )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCamelCase_ )}''' )
lowercase : Union[str, Any] = self._execution_device
lowercase : Optional[Any] = batch_size * num_images_per_prompt
lowercase : Tuple = guidance_scale > 1.0
lowercase : Union[str, Any] = self._encode_image(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# prior
self.scheduler.set_timesteps(lowerCamelCase_, device=lowerCamelCase_ )
lowercase : Dict = self.scheduler.timesteps
lowercase : List[Any] = self.prior.config.num_embeddings
lowercase : List[str] = self.prior.config.embedding_dim
lowercase : Union[str, Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim), image_embeds.dtype, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, self.scheduler, )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
lowercase : List[Any] = latents.reshape(latents.shape[0], lowerCamelCase_, lowerCamelCase_ )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : Tuple = self.scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ )
lowercase : Dict = self.prior(
lowerCamelCase_, timestep=lowerCamelCase_, proj_embedding=lowerCamelCase_, ).predicted_image_embedding
# remove the variance
lowercase : Optional[int] = noise_pred.split(
scaled_model_input.shape[2], dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
lowercase : Optional[int] = noise_pred.chunk(2 )
lowercase : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
lowercase : List[Any] = self.scheduler.step(
lowerCamelCase_, timestep=lowerCamelCase_, sample=lowerCamelCase_, ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCamelCase_ )
lowercase : Optional[int] = []
for i, latent in enumerate(lowerCamelCase_ ):
print()
lowercase : List[Any] = self.renderer.decode(
latent[None, :], lowerCamelCase_, size=lowerCamelCase_, ray_batch_size=4096, n_coarse_samples=64, n_fine_samples=128, )
images.append(lowerCamelCase_ )
lowercase : Union[str, Any] = torch.stack(lowerCamelCase_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
lowercase : List[Any] = images.cpu().numpy()
if output_type == "pil":
lowercase : Optional[Any] = [self.numpy_to_pil(lowerCamelCase_ ) for image in images]
# Offload last model to CPU
if hasattr(self, 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCamelCase_ )
| 364 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase: List[str] = logging.get_logger(__name__)
_UpperCamelCase: List[str] = {'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase: str = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = ['input_ids', 'attention_mask']
_lowerCamelCase = None
def __init__( self : Tuple, lowerCAmelCase : Tuple=None, lowerCAmelCase : Optional[Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : Union[str, Any]="<unk>", lowerCAmelCase : Any="<s>", lowerCAmelCase : str="</s>", lowerCAmelCase : Tuple="<pad>", lowerCAmelCase : Dict=False, lowerCAmelCase : Union[str, Any]=False, **lowerCAmelCase : Optional[Any], ) -> str:
super().__init__(
lowerCAmelCase, lowerCAmelCase, tokenizer_file=lowerCAmelCase, unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, **lowerCAmelCase, )
lowercase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowerCAmelCase ) != add_prefix_space:
lowercase : Dict = getattr(lowerCAmelCase, pre_tok_state.pop('type' ) )
lowercase : Optional[Any] = add_prefix_space
lowercase : List[str] = pre_tok_class(**lowerCAmelCase )
lowercase : List[str] = add_prefix_space
def lowercase ( self : Dict, *lowerCAmelCase : Tuple, **lowerCAmelCase : List[Any] ) -> BatchEncoding:
lowercase : str = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : List[Any], *lowerCAmelCase : Dict, **lowerCAmelCase : Dict ) -> BatchEncoding:
lowercase : List[str] = kwargs.get('is_split_into_words', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowercase : Optional[Any] = self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def lowercase ( self : Tuple, lowerCAmelCase : "Conversation" ) -> List[int]:
lowercase : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
lowercase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 53 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : str ) -> None:
"""simple docstring"""
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 325 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
lowerCamelCase__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = BertTokenizer
def __init__( self : int , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : str=True , lowerCamelCase__ : Tuple="[UNK]" , lowerCamelCase__ : str="[SEP]" , lowerCamelCase__ : Optional[Any]="[PAD]" , lowerCamelCase__ : List[str]="[CLS]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase__ ) != tokenize_chinese_chars
):
_UpperCAmelCase : str = getattr(lowerCamelCase__ , normalizer_state.pop("type" ) )
_UpperCAmelCase : Optional[Any] = do_lower_case
_UpperCAmelCase : Any = strip_accents
_UpperCAmelCase : List[Any] = tokenize_chinese_chars
_UpperCAmelCase : int = normalizer_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = do_lower_case
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 234 | 0 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxAutoencoderKL
@property
def __snake_case ( self : Optional[int] ) -> Dict:
__snake_case : Optional[int] = 4
__snake_case : List[str] = 3
__snake_case : str = (32, 32)
__snake_case : str = jax.random.PRNGKey(0 )
__snake_case : Dict = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __snake_case ( self : Dict ) -> Optional[int]:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : int = self.dummy_input
return init_dict, inputs_dict
| 359 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_snake_case : Union[str, Any] = datasets.load_iris()
_snake_case : Tuple = np.array(data["data"])
_snake_case : int = np.array(data["target"])
_snake_case : int = data["target_names"]
_snake_case , _snake_case , _snake_case , _snake_case : Any = train_test_split(X, y)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return np.linalg.norm(np.array(__lowerCamelCase ) - np.array(__lowerCamelCase ) )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=5 ):
__snake_case : Optional[Any] = zip(__lowerCamelCase , __lowerCamelCase )
# List of distances of all points from the point to be classified
__snake_case : Optional[int] = []
for data_point in data:
__snake_case : Union[str, Any] = euclidean_distance(data_point[0] , __lowerCamelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__snake_case : Dict = [i[1] for i in sorted(__lowerCamelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__snake_case : Any = Counter(__lowerCamelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 134 | 0 |
def A (__A : str , __A : str ) -> float:
"""simple docstring"""
def get_matched_characters(__A : str , __A : str ) -> str:
UpperCAmelCase_ = []
UpperCAmelCase_ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase_ = int(max(0 , i - limit ) )
UpperCAmelCase_ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__A )
UpperCAmelCase_ = F"""{_stra[0:_stra.index(__A )]} {_stra[_stra.index(__A ) + 1:]}"""
return "".join(__A )
# matching characters
UpperCAmelCase_ = get_matched_characters(__A , __A )
UpperCAmelCase_ = get_matched_characters(__A , __A )
UpperCAmelCase_ = len(__A )
# transposition
UpperCAmelCase_ = (
len([(ca, ca) for ca, ca in zip(__A , __A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase_ = 0.0
else:
UpperCAmelCase_ = (
1
/ 3
* (
match_count / len(__A )
+ match_count / len(__A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase_ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 51 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 1 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( _UpperCAmelCase ):
A__ : bool = field(default=_UpperCAmelCase ,metadata={"help": "Whether to use SortishSampler or not."} )
A__ : bool = field(
default=_UpperCAmelCase ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
A__ : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} ,)
A__ : Optional[int] = field(
default=_UpperCAmelCase ,metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} ,)
A__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_UpperCAmelCase ,metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} ,)
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = super().to_dict()
for k, v in d.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case : Dict = v.to_dict()
return d
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a__ ( lowercase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return EnvironmentCommand()
def a__ ( lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCAmelCase ( __snake_case ):
"""simple docstring"""
@staticmethod
def snake_case__ ( lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=A_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=A_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=A_ )
def __init__( self : List[Any] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = accelerate_config_file
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = "not installed"
if is_safetensors_available():
import safetensors
_UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_UpperCamelCase = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
_UpperCamelCase = "not installed"
_UpperCamelCase = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(A_ ):
_UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(A_ , A_ )
else f"""\t{accelerate_config}"""
)
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
_UpperCamelCase = "not installed"
_UpperCamelCase = "not installed"
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase = flax.__version__
_UpperCamelCase = jax.__version__
_UpperCamelCase = jaxlib.__version__
_UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f"""{safetensors_version}""",
"Accelerate version": f"""{accelerate_version}""",
"Accelerate config": f"""{accelerate_config_str}""",
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""",
"Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""",
"Jax version": f"""{jax_version}""",
"JaxLib version": f"""{jaxlib_version}""",
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def snake_case__ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 324 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
return (-y * np.log(_UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Union[str, Any], _UpperCamelCase : List[str] ) -> Tuple:
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCamelCase ) ) )
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : Any, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Tuple=7_00_00 ) -> List[Any]:
A_ = np.zeros(x.shape[1] )
for iterations in range(_UpperCamelCase ):
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
A_ = sigmoid_function(_UpperCamelCase )
A_ = np.dot(x.T, h - y ) / y.size
A_ = theta - alpha * gradient # updating the weights
A_ = np.dot(_UpperCamelCase, _UpperCamelCase )
A_ = sigmoid_function(_UpperCamelCase )
A_ = cost_function(_UpperCamelCase, _UpperCamelCase )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : Optional[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : Tuple = 0.1
__snake_case : Any = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> List[str]:
return sigmoid_function(
np.dot(_UpperCamelCase, _UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Dict = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 18 | '''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> List[str]:
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )
if "model" in sd.keys():
A_ = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model''']
# pop unnecessary weights
A_ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_UpperCamelCase )
A_ = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ = sd.pop(_UpperCamelCase )
A_ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A_ = sd[key]
# We split QKV in separate Q,K,V
A_ = key.replace('''.qkv_proj.''', '''.q_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.k_proj.''' )
A_ = key.replace('''.qkv_proj.''', '''.v_proj.''' )
A_ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ ,A_ ,A_ = torch.split(_UpperCamelCase, depth // 3, dim=0 )
A_ = q
A_ = k
A_ = v
del sd[key]
return sd
@torch.no_grad()
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Optional[Any], _UpperCamelCase : List[str]=None ) -> Dict:
A_ = load_checkpoint(_UpperCamelCase )
if config is not None:
A_ = OPTConfig.from_pretrained(_UpperCamelCase )
else:
A_ = OPTConfig()
A_ = OPTModel(_UpperCamelCase ).half().eval()
model.load_state_dict(_UpperCamelCase )
# Check results
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__snake_case : Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowercase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 43 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = vocab_size - 1
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : List[Any] )-> Tuple:
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,)
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = GPTNeoXModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = True
A__ = GPTNeoXModel(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]:
'''simple docstring'''
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]:
'''simple docstring'''
A__ = self.num_labels
A__ = GPTNeoXForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = True
A__ = GPTNeoXForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ )
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3),config.vocab_size )
A__ = ids_tensor((self.batch_size, 3),vocab_size=2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens],dim=-1 )
A__ = torch.cat([input_mask, next_mask],dim=-1 )
A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ )
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,),output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = GPTNeoXModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 )
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : List[str] )-> Any:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ )
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase_ )
def snake_case__ ( self : Tuple )-> List[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def snake_case__ ( self : str )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ids_tensor([1, 1_0],config.vocab_size )
A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = GPTNeoXModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
A__ = original_model(lowercase_ ).last_hidden_state
A__ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A__ = {'type': scaling_type, 'factor': 10.0}
A__ = GPTNeoXModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
A__ = scaled_model(lowercase_ ).last_hidden_state
A__ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) )
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase_ )
A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 )
A__ = tokenizer.batch_decode(lowercase_ )[0]
self.assertEqual(lowercase_,lowercase_ )
| 7 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_paths_and_base_extractors[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
assert base_extractor.is_extractable(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
if input_path is None:
SCREAMING_SNAKE_CASE = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = Extractor.infer_extractor_format(UpperCAmelCase__ )
assert extractor_format is not None
SCREAMING_SNAKE_CASE = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
SCREAMING_SNAKE_CASE = file_path.read_text(encoding="utf-8" )
else:
SCREAMING_SNAKE_CASE = output_path.read_text(encoding="utf-8" )
SCREAMING_SNAKE_CASE = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ):
import tarfile
SCREAMING_SNAKE_CASE = tmp_path / "data_dot_dot"
directory.mkdir()
SCREAMING_SNAKE_CASE = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(UpperCAmelCase__ , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
import tarfile
SCREAMING_SNAKE_CASE = tmp_path / "data_sym_link"
directory.mkdir()
SCREAMING_SNAKE_CASE = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=UpperCAmelCase__ )
with tarfile.TarFile(UpperCAmelCase__ , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
SCREAMING_SNAKE_CASE = insecure_tar_files[insecure_tar_file]
SCREAMING_SNAKE_CASE = tmp_path / "extracted"
TarExtractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
SCREAMING_SNAKE_CASE = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
SCREAMING_SNAKE_CASE = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(UpperCAmelCase__ )
assert zipfile.is_zipfile(str(UpperCAmelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCAmelCase__ ) # but we're right
| 206 | import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : List[Any] = {
'''allenai/led-base-16384''': 1_63_84,
}
class lowercase ( a ):
lowercase__ : Tuple = VOCAB_FILES_NAMES
lowercase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = LEDTokenizer
lowercase__ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple=None , _UpperCamelCase : str=None , _UpperCamelCase : Tuple=None , _UpperCamelCase : List[str]="replace" , _UpperCamelCase : str="<s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : List[Any]="</s>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : List[Any]="<pad>" , _UpperCamelCase : Tuple="<mask>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : List[Any]=True , **_UpperCamelCase : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = "post_processor"
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , _UpperCamelCase , _UpperCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state["sep"] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state["cls"] )
SCREAMING_SNAKE_CASE = False
if state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get("trim_offsets" , _UpperCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , state.pop("type" ) )
SCREAMING_SNAKE_CASE = component_class(**_UpperCamelCase )
setattr(self.backend_tokenizer , _UpperCamelCase , _UpperCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __snake_case( self : int ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else value
SCREAMING_SNAKE_CASE = value
def __snake_case( self : List[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , *_UpperCamelCase : Dict , **_UpperCamelCase : Tuple ) -> BatchEncoding:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.get("is_split_into_words" , _UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs." )
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case( self : Optional[Any] , _UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs["global_attention_mask"] ) != len(_UpperCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 206 | 1 |
"""simple docstring"""
def __lowercase ( snake_case_ : int ) ->int:
'''simple docstring'''
__A : Optional[int] = [1]
__A , __A , __A : Optional[Any] = 0, 0, 0
__A : Optional[int] = ugly_nums[ia] * 2
__A : Tuple = ugly_nums[ia] * 3
__A : str = ugly_nums[ia] * 5
for _ in range(1 ,snake_case_ ):
__A : Dict = min(snake_case_ ,snake_case_ ,snake_case_ )
ugly_nums.append(snake_case_ )
if next_num == next_a:
ia += 1
__A : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__A : int = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__A : Optional[Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 179 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """microsoft/speecht5_tts"""
_lowerCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase = """text_reader"""
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""audio"""]
def UpperCamelCase__( self ):
'''simple docstring'''
if self.post_processor is None:
__A : List[str] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 179 | 1 |
from __future__ import annotations
from collections import namedtuple
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = namedtuple("result" ,"name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" ,power / current )
elif current == 0:
return result("current" ,power / voltage )
elif power == 0:
return result("power" ,float(round(abs(voltage * current ) ,2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(A__ ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[Any] = ParquetDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = ParquetDatasetReader(A__ ,cache_dir=A__ ,split=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def snake_case ( A__ ,A__ ,A__ ):
if issubclass(A__ ,A__ ):
UpperCAmelCase_ : int = parquet_path
elif issubclass(A__ ,A__ ):
UpperCAmelCase_ : Any = [parquet_path]
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Tuple = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_dataset(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__=("train",) ):
assert isinstance(A__ ,A__ )
for split in splits:
UpperCAmelCase_ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Dict = ParquetDatasetReader(
{"train": parquet_path} ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : int = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Any = ParquetDatasetReader({"train": parquet_path} ,features=A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
if split:
UpperCAmelCase_ : Optional[Any] = {split: parquet_path}
else:
UpperCAmelCase_ : Union[str, Any] = "train"
UpperCAmelCase_ : Dict = {"train": parquet_path, "test": parquet_path}
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = ParquetDatasetReader(A__ ,cache_dir=A__ ).read()
_check_parquet_datasetdict(A__ ,A__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCAmelCase_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[str] = str(shared_datadir / "test_image_rgb.jpg" )
UpperCAmelCase_ : Optional[Any] = {"image": [image_path]}
UpperCAmelCase_ : Optional[Any] = Features({"image": Image()} )
UpperCAmelCase_ : List[Any] = Dataset.from_dict(A__ ,features=A__ )
UpperCAmelCase_ : str = ParquetDatasetWriter(A__ ,tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Tuple = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase_ : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) ,streaming=A__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" ,[
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def snake_case ( A__ ,A__ ):
assert get_writer_batch_size(A__ ) == expected
| 253 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[int] , lowercase_ : Callable , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[dict] = None , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
super().__init__(
features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
lowercase_ : List[str] = Generator(
cache_dir=lowercase_ , features=lowercase_ , generator=lowercase_ , gen_kwargs=lowercase_ , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
lowercase_ : int = None
lowercase_ : Optional[int] = None
lowercase_ : Optional[Any] = None
lowercase_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
lowercase_ : Any = self.builder.as_dataset(
split="""train""" , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
| 239 | '''simple docstring'''
import math
def lowerCamelCase ( UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase__ )
if number < 1:
lowercase_ : Tuple = F'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCAmelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ : Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ : Any = [3, 5]
lowercase_ : List[Any] = 2
lowercase_ : Optional[Any] = 3
for block in range(1 , UpperCAmelCase__ ):
for _ in range(UpperCAmelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
_lowercase : Optional[Any] = 0
try:
_lowercase : str = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 239 | 1 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self : int , lowercase_ : int , lowercase_ : int=13 , lowercase_ : Optional[Any]=7 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=99 , lowercase_ : Union[str, Any]=32 , lowercase_ : Dict=5 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=37 , lowercase_ : str="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : int=16 , lowercase_ : List[str]=2 , lowercase_ : List[str]=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[str]=None , ):
snake_case_ : Optional[int] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Union[str, Any] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : List[str] = use_input_mask
snake_case_ : Optional[Any] = use_token_type_ids
snake_case_ : str = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Dict = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Dict = type_vocab_size
snake_case_ : int = type_sequence_label_size
snake_case_ : Tuple = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Dict = num_choices
snake_case_ : str = scope
def _snake_case ( self : Dict ):
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_input_mask:
snake_case_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Tuple = None
snake_case_ : str = None
snake_case_ : Any = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : List[str] ):
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def _snake_case ( self : Any , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ : List[Any] = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Any = model(lowercase_ , attention_mask=lowercase_ )
snake_case_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , ):
snake_case_ : List[str] = True
snake_case_ : Tuple = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ : str = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ : Any = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Dict , lowercase_ : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , ):
snake_case_ : Optional[int] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : int , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , ):
snake_case_ : int = True
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
snake_case_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
snake_case_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case_ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
snake_case_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
snake_case_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : List[str] = config_and_inputs
snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_lowerCAmelCase : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Union[str, Any] = False
def _snake_case ( self : List[Any] ):
snake_case_ : Any = OpenLlamaModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : Tuple = type
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : Optional[int] ):
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Dict = 3
snake_case_ : Dict = input_dict['''input_ids''']
snake_case_ : int = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Tuple = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Union[str, Any] ):
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Dict = 3
snake_case_ : str = '''single_label_classification'''
snake_case_ : Tuple = input_dict['''input_ids''']
snake_case_ : Optional[int] = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case_ : Union[str, Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _snake_case ( self : Optional[Any] ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = 3
snake_case_ : Optional[Any] = '''multi_label_classification'''
snake_case_ : Tuple = input_dict['''input_ids''']
snake_case_ : str = input_ids.ne(1 ).to(lowercase_ )
snake_case_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case_ : Any = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def _snake_case ( self : List[str] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _snake_case ( self : Tuple , lowercase_ : Dict ):
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[str] = ids_tensor([1, 10] , config.vocab_size )
snake_case_ : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Any = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
snake_case_ : Optional[Any] = original_model(lowercase_ ).last_hidden_state
snake_case_ : Optional[Any] = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case_ : Dict = {'''type''': scaling_type, '''factor''': 10.0}
snake_case_ : Union[str, Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
snake_case_ : str = scaled_model(lowercase_ ).last_hidden_state
snake_case_ : List[str] = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 155 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__ ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self :Dict ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_a : str =UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def __UpperCAmelCase ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
_a : Any =self.dummy_uncond_unet
_a : Optional[Any] =ScoreSdeVeScheduler()
_a : Tuple =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.manual_seed(0 )
_a : Tuple =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE ).images
_a : Dict =torch.manual_seed(0 )
_a : Tuple =sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )[
0
]
_a : Any =image[0, -3:, -3:, -1]
_a : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_a : Any =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : List[str] ="""google/ncsnpp-church-256"""
_a : Optional[int] =UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
_a : int =torch.manual_seed(0 )
_a : Optional[int] =sde_ve(num_inference_steps=1_0 , output_type="""numpy""" , generator=SCREAMING_SNAKE_CASE ).images
_a : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : str =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 276 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 10**-1 ):
'''simple docstring'''
lowercase = cross(a_ , a_ )
lowercase = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ :Any = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowercase__ :Dict = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ :List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowercase__ :str = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ :Tuple = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
lowercase__ :List[str] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 364 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self ,A__ ,A__=9_9 ,A__=1_3 ,A__=1_6 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=False ,A__=True ,A__=2 ,A__=3_2 ,A__=4 ,A__=4 ,A__=3_0 ,A__=0 ,A__=1 ,A__=2 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_layers
lowercase = decoder_ffn_dim
lowercase = decoder_attention_heads
lowercase = decoder_attention_heads
lowercase = eos_token_id
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = use_cache
lowercase = max_position_embeddings
lowercase = None
lowercase = decoder_seq_length
lowercase = 2
lowercase = 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2)
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
lowercase = True
lowercase = TrOCRDecoder(config=A__).to(A__).eval()
lowercase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase = model(A__ ,use_cache=A__)
lowercase = model(A__)
lowercase = model(A__ ,use_cache=A__)
self.parent.assertTrue(len(A__) == len(A__))
self.parent.assertTrue(len(A__) == len(A__) + 1)
lowercase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((2, 1) ,config.vocab_size - 1) + 1
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] ,dim=-1)
lowercase = model(A__)['''last_hidden_state''']
lowercase = model(A__ ,past_key_values=A__)['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) ,output_from_past.shape[-1]).item()
lowercase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A__ ,A__ ,atol=1E-3)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ : Dict =(TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ : int ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ : List[Any] =True
lowercase_ : int =False
def A__ ( self):
lowercase = TrOCRStandaloneDecoderModelTester(self ,is_training=A__)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A__)
def A__ ( self):
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
| 97 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : str = logging.get_logger(__name__)
A : Union[str, Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] ="""swin"""
__UpperCAmelCase : Any ={
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=[2, 2, 6, 2] , __a=[3, 6, 12, 24] , __a=7 , __a=4.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=0.0_2 , __a=1e-5 , __a=32 , __a=None , __a=None , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(__a )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase = int(embed_dim * 2 ** (len(__a ) - 1) )
__lowerCAmelCase = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__a ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =version.parse("""1.11""" )
@property
def snake_case ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case ( self ):
return 1e-4
| 57 |
'''simple docstring'''
a__ : Optional[Any] =256
# Modulus to hash a string
a__ : Dict =1_000_003
def lowercase__ ( __lowercase : str , __lowercase : str ) -> bool:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowercase ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
"""simple docstring"""
__UpperCamelCase = 'abc1abc12'
__UpperCamelCase = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
__UpperCamelCase = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowercase , __lowercase ) and not rabin_karp(__lowercase , __lowercase )
# Test 2)
__UpperCamelCase = 'ABABX'
__UpperCamelCase = 'ABABZABABYABABX'
assert rabin_karp(__lowercase , __lowercase )
# Test 3)
__UpperCamelCase = 'AAAB'
__UpperCamelCase = 'ABAAAAAB'
assert rabin_karp(__lowercase , __lowercase )
# Test 4)
__UpperCamelCase = 'abcdabcy'
__UpperCamelCase = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowercase , __lowercase )
# Test 5)
__UpperCamelCase = 'Lü'
__UpperCamelCase = 'Lüsai'
assert rabin_karp(__lowercase , __lowercase )
__UpperCamelCase = 'Lue'
assert not rabin_karp(__lowercase , __lowercase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 53 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class A_ :
_UpperCAmelCase : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
_UpperCAmelCase : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
_UpperCAmelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} )
_UpperCAmelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
_UpperCAmelCase : str = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
_UpperCAmelCase : str = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
_UpperCAmelCase : str = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
_UpperCAmelCase : str = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
_UpperCAmelCase : str = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
_UpperCAmelCase : str = field(
default=f'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
_UpperCAmelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
_UpperCAmelCase : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def lowerCAmelCase ( self : Tuple):
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' ,SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : Dict):
return json.dumps(dataclasses.asdict(self) ,indent=2)
@property
def lowerCAmelCase ( self : Optional[Any]):
if len(self.models) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].')
return self.models
@property
def lowerCAmelCase ( self : Any):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.')
return False
else:
return True
| 351 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> None:
__lowerCamelCase : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), F"{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
try:
__lowerCamelCase : List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
F" {n_student}" )
return list(range(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = "student" , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase : int = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), F"teacher must be a model or string got type {type(lowerCamelCase__ )}"
__lowerCamelCase : str = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase : Optional[int] = teacher_e
if d is None:
__lowerCamelCase : Optional[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__lowerCamelCase , __lowerCamelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase : Union[str, Any] = teacher_e
if d is None:
__lowerCamelCase : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
__lowerCamelCase : str = teacher.config_class(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase : Tuple = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
F" {save_path}" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase : List[int] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
__lowerCamelCase : Dict = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 113 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0}
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 28 |
'''simple docstring'''
__snake_case : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 134 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowercase :
"""simple docstring"""
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
__A = namedtuple('''CoinsDistribResult''', '''moves excess''')
def __a ( lowerCAmelCase_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase_ ) != count_coins(lowerCAmelCase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowerCAmelCase_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
UpperCAmelCase_, UpperCAmelCase_= get_distrib(node.left )
UpperCAmelCase_, UpperCAmelCase_= get_distrib(node.right )
UpperCAmelCase_= 1 - left_distrib_excess
UpperCAmelCase_= 1 - right_distrib_excess
UpperCAmelCase_= (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase_ )
+ abs(lowerCAmelCase_ )
)
UpperCAmelCase_= node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase_ ,lowerCAmelCase_ )
return get_distrib(lowerCAmelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''CLIPFeatureExtractor''']
__A = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 277 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Union[str, Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
lowerCamelCase : Tuple = {'mobilebert-uncased': 512}
lowerCamelCase : int = {}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = VOCAB_FILES_NAMES
lowerCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : int = MobileBertTokenizer
def __init__(self : Optional[int] , UpperCamelCase : Dict=None , UpperCamelCase : Tuple=None , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : str="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : str="[MASK]" , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=None , **UpperCamelCase : List[str] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(UpperCamelCase , normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**UpperCamelCase )
lowercase__ = do_lower_case
def UpperCamelCase__ (self : str , UpperCamelCase : Tuple , UpperCamelCase : Any=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ (self : int , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 2 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCamelCase = '''src/transformers'''
UpperCamelCase = '''docs/source/en'''
UpperCamelCase = '''.'''
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
with open(__lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A: str = f.readlines()
# Find the start prompt.
A: Union[str, Any] = 0
while not lines[start_index].startswith(__lowercase ):
start_index += 1
start_index += 1
A: Union[str, Any] = start_index
while not lines[end_index].startswith(__lowercase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCamelCase = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
UpperCamelCase = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
UpperCamelCase = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase = direct_transformers_import(TRANSFORMERS_PATH)
def SCREAMING_SNAKE_CASE( __lowercase ) -> Any:
A: Union[str, Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __lowercase )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[Any]:
A: List[Any] = 2 if text == '''✅''' or text == '''❌''' else len(__lowercase )
A: Union[str, Any] = (width - text_length) // 2
A: Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def SCREAMING_SNAKE_CASE( ) -> Optional[int]:
A: Dict = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A: Optional[int] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A: str = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A: Dict = collections.defaultdict(__lowercase )
A: Dict = collections.defaultdict(__lowercase )
A: str = collections.defaultdict(__lowercase )
A: List[Any] = collections.defaultdict(__lowercase )
A: str = collections.defaultdict(__lowercase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__lowercase ):
A: Any = None
if attr_name.endswith('''Tokenizer''' ):
A: List[str] = slow_tokenizers
A: Tuple = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
A: Union[str, Any] = fast_tokenizers
A: int = attr_name[:-1_3]
elif _re_tf_models.match(__lowercase ) is not None:
A: List[Any] = tf_models
A: List[Any] = _re_tf_models.match(__lowercase ).groups()[0]
elif _re_flax_models.match(__lowercase ) is not None:
A: int = flax_models
A: Optional[int] = _re_flax_models.match(__lowercase ).groups()[0]
elif _re_pt_models.match(__lowercase ) is not None:
A: Optional[Any] = pt_models
A: Tuple = _re_pt_models.match(__lowercase ).groups()[0]
if lookup_dict is not None:
while len(__lowercase ) > 0:
if attr_name in model_name_to_prefix.values():
A: Dict = True
break
# Try again after removing the last word in the name
A: Dict = ''''''.join(camel_case_split(__lowercase )[:-1] )
# Let's build that table!
A: List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A: int = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A: Tuple = [len(__lowercase ) + 2 for c in columns]
A: List[str] = max([len(__lowercase ) for name in model_names] ) + 2
# Build the table per se
A: Union[str, Any] = '''|''' + '''|'''.join([_center_text(__lowercase , __lowercase ) for c, w in zip(__lowercase , __lowercase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
A: Optional[int] = {True: '''✅''', False: '''❌'''}
for name in model_names:
A: Union[str, Any] = model_name_to_prefix[name]
A: Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__lowercase , __lowercase ) for l, w in zip(__lowercase , __lowercase )] ) + "|\n"
return table
def SCREAMING_SNAKE_CASE( __lowercase=False ) -> List[Any]:
A , A , A , A: List[Any] = _find_text_in_file(
filename=os.path.join(__lowercase , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
A: Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__lowercase , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 334 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 334 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowerCamelCase : int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
__lowerCamelCase : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[str] = s_dict.pop(lowerCAmelCase )
return s_dict
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = emb.weight.shape
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = emb.weight.data
return lin_layer
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.basename(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = url.split("/" )[-2]
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = open(lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=8_0 , unit="iB" , unit_scale=lowerCAmelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
SCREAMING_SNAKE_CASE_ : int = source.read(8_1_9_2 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Any = open(lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Dict = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.load(lowerCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = original_checkpoint["dims"]
SCREAMING_SNAKE_CASE_ : Any = original_checkpoint["model_state_dict"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
SCREAMING_SNAKE_CASE_ : Tuple = WhisperForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f' but all the following weights are missing {missing}' )
if tie_embeds:
SCREAMING_SNAKE_CASE_ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase : str = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _UpperCamelCase ( UpperCamelCase__ ) -> str:
UpperCAmelCase__ : List[Any] = SwinConfig(image_size=1_9_2 )
if "base" in model_name:
UpperCAmelCase__ : int = 6
UpperCAmelCase__ : str = 1_2_8
UpperCAmelCase__ : int = (2, 2, 1_8, 2)
UpperCAmelCase__ : Dict = (4, 8, 1_6, 3_2)
elif "large" in model_name:
UpperCAmelCase__ : List[Any] = 1_2
UpperCAmelCase__ : List[str] = 1_9_2
UpperCAmelCase__ : Tuple = (2, 2, 1_8, 2)
UpperCAmelCase__ : Dict = (6, 1_2, 2_4, 4_8)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
UpperCAmelCase__ : Union[str, Any] = window_size
UpperCAmelCase__ : Tuple = embed_dim
UpperCAmelCase__ : str = depths
UpperCAmelCase__ : Optional[Any] = num_heads
return config
def _UpperCamelCase ( UpperCamelCase__ ) -> Any:
if "encoder.mask_token" in name:
UpperCAmelCase__ : Dict = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase__ : List[str] = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase__ : str = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
UpperCAmelCase__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase__ : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Optional[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : int = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
UpperCAmelCase__ : List[str] = """layernorm.weight"""
if name == "encoder.norm.bias":
UpperCAmelCase__ : Any = """layernorm.bias"""
if "decoder" in name:
pass
else:
UpperCAmelCase__ : Optional[Any] = """swin.""" + name
return name
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : int = orig_state_dict.pop(UpperCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase__ : Any = key.split(""".""" )
UpperCAmelCase__ : Union[str, Any] = int(key_split[2] )
UpperCAmelCase__ : List[Any] = int(key_split[4] )
UpperCAmelCase__ : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : List[Any] = val[:dim, :]
UpperCAmelCase__ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase__ : Optional[int] = val[-dim:, :]
else:
UpperCAmelCase__ : List[Any] = val[
:dim
]
UpperCAmelCase__ : int = val[
dim : dim * 2
]
UpperCAmelCase__ : List[Any] = val[
-dim:
]
else:
UpperCAmelCase__ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
UpperCAmelCase__ : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : Optional[int] = get_swin_config(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = SwinForMaskedImageModeling(UpperCamelCase__ )
model.eval()
UpperCAmelCase__ : List[str] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
UpperCAmelCase__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Any = ViTImageProcessor(size={"""height""": 1_9_2, """width""": 1_9_2} )
UpperCAmelCase__ : Tuple = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
UpperCAmelCase__ : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
with torch.no_grad():
UpperCAmelCase__ : int = model(**UpperCamelCase__ ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 357 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ['''image_processor''', '''tokenizer''']
lowerCAmelCase :Optional[int] = '''BridgeTowerImageProcessor'''
lowerCAmelCase :List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__(_lowerCamelCase , _lowerCamelCase)
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : Optional[Any] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , do_normalize=_lowerCamelCase , do_center_crop=_lowerCamelCase , **_lowerCamelCase)
encoding.update(_lowerCamelCase)
return encoding
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase)
@property
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer.model_input_names
UpperCAmelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) | 283 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.