code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case_ = set()
return any(
node not in visited and depth_first_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for node in graph )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
visited.add(__lowerCAmelCase )
rec_stk.add(__lowerCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowerCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 4_2
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Any:
snake_case_ = [[] for _ in range(lowerCAmelCase__)]
snake_case_ = size
def __getitem__( self, lowerCAmelCase__) -> Iterator[Edge]:
return iter(self._graph[vertex])
@property
def a_ ( self) -> Dict:
return self._size
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).')
self._graph[from_vertex].append(Edge(lowerCAmelCase__, lowerCAmelCase__))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None:
snake_case_ = deque([start_vertex])
snake_case_ = [None] * self.size
snake_case_ = 0
while queue:
snake_case_ = queue.popleft()
snake_case_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case_ = current_distance + edge.weight
snake_case_ = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__, lowerCAmelCase__)
and new_distance >= dest_vertex_distance
):
continue
snake_case_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "M-CLIP"
def __init__( self, lowerCAmelCase__=1024, lowerCAmelCase__=768, **lowerCAmelCase__) -> List[str]:
snake_case_ = transformerDimSize
snake_case_ = imageDimSize
super().__init__(**lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = MCLIPConfig
def __init__( self, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> str:
super().__init__(lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = XLMRobertaModel(lowerCAmelCase__)
snake_case_ = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = self.transformer(input_ids=lowerCAmelCase__, attention_mask=lowerCAmelCase__)[0]
snake_case_ = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowerCAmelCase__), embs
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
snake_case_ = inspect.getfile(accelerate.test_utils)
snake_case_ = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['scripts', 'external_deps', 'test_metrics.py'])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
snake_case_ = test_metrics
@require_cpu
def a_ ( self) -> Any:
debug_launcher(self.test_metrics.main, num_processes=1)
@require_cpu
def a_ ( self) -> List[Any]:
debug_launcher(self.test_metrics.main)
@require_single_gpu
def a_ ( self) -> int:
self.test_metrics.main()
@require_multi_gpu
def a_ ( self) -> Optional[int]:
print(f'Found {torch.cuda.device_count()} devices.')
snake_case_ = ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowerCAmelCase__, env=os.environ.copy())
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ = R'\w+[.]\d+'
snake_case_ = re.findall(UpperCAmelCase , UpperCAmelCase )
for pat in pats:
snake_case_ = key.replace(UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case_ = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case_ = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case_ = flax_model.init_weights(PRNGKey(UpperCAmelCase ) )
snake_case_ = flatten_dict(UpperCAmelCase )
snake_case_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = rename_key(UpperCAmelCase )
snake_case_ = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
snake_case_ , snake_case_ = rename_key_and_reshape_tensor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCamelCase = data_utils.TransfoXLTokenizer
__UpperCamelCase = data_utils.TransfoXLCorpus
__UpperCamelCase = data_utils
__UpperCamelCase = data_utils
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase , 'rb' ) as fp:
snake_case_ = pickle.load(UpperCAmelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
snake_case_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
snake_case_ = corpus.vocab.__dict__
torch.save(UpperCAmelCase , UpperCAmelCase )
snake_case_ = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase )
snake_case_ = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(UpperCAmelCase , UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
snake_case_ = os.path.abspath(UpperCAmelCase )
snake_case_ = os.path.abspath(UpperCAmelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
snake_case_ = TransfoXLConfig()
else:
snake_case_ = TransfoXLConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = TransfoXLLMHeadModel(UpperCAmelCase )
snake_case_ = load_tf_weights_in_transfo_xl(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(f'Save PyTorch model to {os.path.abspath(UpperCAmelCase )}' )
torch.save(model.state_dict() , UpperCAmelCase )
print(f'Save configuration file to {os.path.abspath(UpperCAmelCase )}' )
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
snake_case_ = []
for char_count in range(UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "convnextv2"
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="gelu", lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=0.0, lowerCAmelCase__=224, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Any:
super().__init__(**lowerCAmelCase__)
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = num_stages
snake_case_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
snake_case_ = [3, 3, 9, 3] if depths is None else depths
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = drop_path_rate
snake_case_ = image_size
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
snake_case_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase ( ) -> List[Any]:
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = "tokenizer_file"
SCREAMING_SNAKE_CASE_ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def a_ ( self) -> str:
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, **lowerCAmelCase__) -> Any:
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(lowerCAmelCase__)['input_ids']
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__=6) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase__, max_length=lowerCAmelCase__)
tokenizer_r.encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__)
tokenizer_r.batch_encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__)
tokenizer_r.encode(lowerCAmelCase__, max_length=lowerCAmelCase__)
tokenizer_r.batch_encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
snake_case_ = None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length')
# Simple input
self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length')
# Simple input
self.assertRaises(
lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', )
# Pair input
self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length')
# Pair input
self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length')
# Pair input
self.assertRaises(
lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', )
def a_ ( self) -> Dict:
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli', 'all_languages', split='test', streaming=lowerCAmelCase__)
snake_case_ = next(iter(lowerCAmelCase__))['premise'] # pick up one data
snake_case_ = list(sample_data.values())
snake_case_ = list(map(tokenizer.encode, lowerCAmelCase__))
snake_case_ = [tokenizer.decode(lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__) for x in output_tokens]
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
# Initialise PyTorch model
snake_case_ = LxmertConfig.from_json_file(UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
snake_case_ = LxmertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 362
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 0
|
from collections.abc import Sequence
def UpperCAmelCase ( UpperCAmelCase = None ) -> int:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
snake_case_ = nums[0]
for i in range(1 , len(UpperCAmelCase ) ):
snake_case_ = nums[i]
snake_case_ = max(UpperCAmelCase , ans + num , UpperCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__UpperCamelCase = int(input('''Enter number of elements : ''').strip())
__UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 363
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "token_type_ids"]
SCREAMING_SNAKE_CASE_ = FNetTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__="<unk>", lowerCAmelCase__="[SEP]", lowerCAmelCase__="<pad>", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", **lowerCAmelCase__, ) -> int:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case_ = (
AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__, normalized=lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__)
else mask_token
)
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, remove_space=lowerCAmelCase__, keep_accents=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> str:
torch.manual_seed(0)
snake_case_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def a_ ( self) -> Any:
snake_case_ = self.dummy_uncond_unet
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=2, generator=lowerCAmelCase__, output_type='numpy').images
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=2, generator=lowerCAmelCase__, output_type='numpy', return_dict=lowerCAmelCase__)[0]
snake_case_ = image[0, -3:, -3:, -1]
snake_case_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Any:
snake_case_ = 'google/ncsnpp-celebahq-256'
snake_case_ = UNetaDModel.from_pretrained(lowerCAmelCase__)
snake_case_ = KarrasVeScheduler()
snake_case_ = KarrasVePipeline(unet=lowerCAmelCase__, scheduler=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(num_inference_steps=20, generator=lowerCAmelCase__, output_type='numpy').images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case_ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 365
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "deit"
def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=16, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = encoder_stride
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets' )
snake_case_ = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase )
if save_dir is None:
snake_case_ = f'{dataset}-{pair}'
snake_case_ = Path(UpperCAmelCase )
save_dir.mkdir(exist_ok=UpperCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
snake_case_ = 'val' if split == 'validation' else split
snake_case_ = save_dir.joinpath(f'{fn}.source' )
snake_case_ = save_dir.joinpath(f'{fn}.target' )
snake_case_ = src_path.open('w+' )
snake_case_ = tgt_path.open('w+' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case_ = x['translation']
src_fp.write(ex[src_lang] + '\n' )
tgt_fp.write(ex[tgt_lang] + '\n' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCamelCase = datasets.logging.get_logger(__name__)
__UpperCamelCase = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
__UpperCamelCase = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
__UpperCamelCase = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
__UpperCamelCase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def a_ ( self) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/google-research/bleurt', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Value('string', id='sequence'),
}), codebase_urls=['https://github.com/google-research/bleurt'], reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'], )
def a_ ( self, lowerCAmelCase__) -> Any:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'Using default BLEURT-Base checkpoint for sequence maximum length 128. '
'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').')
snake_case_ = 'bleurt-base-128'
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ = self.config_name.upper()
else:
raise KeyError(
f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}')
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
snake_case_ = score.BleurtScorer(os.path.join(lowerCAmelCase__, lowerCAmelCase__))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = self.scorer.score(references=lowerCAmelCase__, candidates=lowerCAmelCase__)
return {"scores": scores}
| 368
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 0
|
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Tuple:
snake_case_ = None
if token is not None:
snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
snake_case_ = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase ).json()
snake_case_ = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCAmelCase ):
snake_case_ = requests.get(url + f'&page={i + 2}' , headers=UpperCAmelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Optional[Any]:
snake_case_ = None
if token is not None:
snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
snake_case_ = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase ).json()
snake_case_ = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ = math.ceil((result['total_count'] - 100) / 100 )
for i in range(UpperCAmelCase ):
snake_case_ = requests.get(url + f'&page={i + 2}' , headers=UpperCAmelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = None
if token is not None:
snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase , allow_redirects=UpperCAmelCase )
snake_case_ = result.headers['Location']
snake_case_ = requests.get(UpperCAmelCase , allow_redirects=UpperCAmelCase )
snake_case_ = os.path.join(UpperCAmelCase , f'{artifact_name}.zip' )
with open(UpperCAmelCase , 'wb' ) as fp:
fp.write(response.content )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
snake_case_ = []
snake_case_ = []
snake_case_ = None
with zipfile.ZipFile(UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase ) as f:
for line in f:
snake_case_ = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ = line[: line.index(': ' )]
snake_case_ = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ = line[len('FAILED ' ) :]
failed_tests.append(UpperCAmelCase )
elif filename == "job_name.txt":
snake_case_ = line
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase )} for `errors` '
f'and {len(UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
snake_case_ = None
if job_name and job_links:
snake_case_ = job_links.get(UpperCAmelCase , UpperCAmelCase )
# A list with elements of the form (line of error, error, failed test)
snake_case_ = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase , UpperCAmelCase )]
return result
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Any:
snake_case_ = []
snake_case_ = [os.path.join(UpperCAmelCase , UpperCAmelCase ) for p in os.listdir(UpperCAmelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase , job_links=UpperCAmelCase ) )
return errors
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
snake_case_ = Counter()
counter.update([x[1] for x in logs] )
snake_case_ = counter.most_common()
snake_case_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase ) )
return r
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ = test.split('/' )[2]
else:
snake_case_ = None
return test
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> int:
snake_case_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ = [x for x in logs if x[2] is not None]
snake_case_ = {x[2] for x in logs}
snake_case_ = {}
for test in tests:
snake_case_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ = counter.most_common()
snake_case_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ = sum(error_counts.values() )
if n_errors > 0:
snake_case_ = {'count': n_errors, 'errors': error_counts}
snake_case_ = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase ) )
return r
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = '| no. | error | status |'
snake_case_ = '|-:|:-|:-|'
snake_case_ = [header, sep]
for error in reduced_by_error:
snake_case_ = reduced_by_error[error]['count']
snake_case_ = f'| {count} | {error[:100]} | |'
lines.append(UpperCAmelCase )
return "\n".join(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = '| model | no. of errors | major error | count |'
snake_case_ = '|-:|-:|-:|-:|'
snake_case_ = [header, sep]
for model in reduced_by_model:
snake_case_ = reduced_by_model[model]['count']
snake_case_ , snake_case_ = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(UpperCAmelCase )
return "\n".join(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
__UpperCamelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCamelCase = get_job_links(args.workflow_run_id, token=args.token)
__UpperCamelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCamelCase = k.find(''' / ''')
__UpperCamelCase = k[index + len(''' / ''') :]
__UpperCamelCase = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCamelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCamelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCamelCase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCamelCase = reduce_by_error(errors)
__UpperCamelCase = reduce_by_model(errors)
__UpperCamelCase = make_github_table(reduced_by_error)
__UpperCamelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 369
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = downstream_dict['projector.weight']
snake_case_ = downstream_dict['projector.bias']
snake_case_ = downstream_dict['model.post_net.linear.weight']
snake_case_ = downstream_dict['model.post_net.linear.bias']
return model
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = downstream_dict['model.linear.weight']
snake_case_ = downstream_dict['model.linear.bias']
return model
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
snake_case_ = WavaVecaForXVector.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = downstream_dict['connector.weight']
snake_case_ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
snake_case_ = downstream_dict['objective.W']
return model
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
snake_case_ = checkpoint['Downstream']
snake_case_ = WavaVecaConfig.from_pretrained(UpperCAmelCase )
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , do_normalize=UpperCAmelCase )
snake_case_ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
snake_case_ = convert_classification(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith('ForAudioFrameClassification' ):
snake_case_ = convert_diarization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith('ForXVector' ):
snake_case_ = convert_xvector(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCAmelCase )
hf_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCamelCase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "ernie_m"
SCREAMING_SNAKE_CASE_ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self, lowerCAmelCase__ = 25_0002, lowerCAmelCase__ = 768, lowerCAmelCase__ = 12, lowerCAmelCase__ = 12, lowerCAmelCase__ = 3072, lowerCAmelCase__ = "gelu", lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 0.1, lowerCAmelCase__ = 514, lowerCAmelCase__ = 0.02, lowerCAmelCase__ = 1, lowerCAmelCase__ = 1e-05, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=0.0, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "gpt_neox"
def __init__( self, lowerCAmelCase__=5_0432, lowerCAmelCase__=6144, lowerCAmelCase__=44, lowerCAmelCase__=64, lowerCAmelCase__=2_4576, lowerCAmelCase__="gelu", lowerCAmelCase__=0.25, lowerCAmelCase__=1_0000, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=2048, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__=2, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Any:
super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!')
def a_ ( self) -> Tuple:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, lowerCAmelCase__) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'got {self.rope_scaling}')
snake_case_ = self.rope_scaling.get('type', lowerCAmelCase__)
snake_case_ = self.rope_scaling.get('factor', lowerCAmelCase__)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__, lowerCAmelCase__) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__UpperCamelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = []
def __init__( self, lowerCAmelCase__, lowerCAmelCase__="<unk>", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<pad>", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[MASK]", lowerCAmelCase__="[CLS]", lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else bos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else eos_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else unk_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else pad_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else cls_token
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase__)
@property
def a_ ( self) -> Dict:
return self.sp_model.get_piece_size()
def a_ ( self) -> Optional[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> int:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self, lowerCAmelCase__) -> Dict:
snake_case_ = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def a_ ( self, lowerCAmelCase__) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
return self.sp_model.piece_to_id(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Dict:
snake_case_ = self.sp_model.IdToPiece(lowerCAmelCase__)
return token
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = []
snake_case_ = ''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__)
snake_case_ = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = False, lowerCAmelCase__ = None, lowerCAmelCase__ = True, **lowerCAmelCase__, ) -> str:
snake_case_ = kwargs.pop('use_source_tokenizer', lowerCAmelCase__)
snake_case_ = self.convert_ids_to_tokens(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
snake_case_ = []
sub_texts.append(lowerCAmelCase__)
else:
current_sub_text.append(lowerCAmelCase__)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R' (\[(MASK|SEP)\])', R'\1', ' '.join(lowerCAmelCase__))
else:
snake_case_ = ''.join(lowerCAmelCase__)
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(lowerCAmelCase__)
return clean_text
else:
return text
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1] + ([0] * len(lowerCAmelCase__)) + [1]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "x" , UpperCAmelCase = 10**-10 , UpperCAmelCase = 1 , ) -> complex:
snake_case_ = symbols(UpperCAmelCase )
snake_case_ = lambdify(UpperCAmelCase , UpperCAmelCase )
snake_case_ = lambdify(UpperCAmelCase , diff(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = starting_point
while True:
if diff_function(UpperCAmelCase ) != 0:
snake_case_ = prev_guess - multiplicity * func(UpperCAmelCase ) / diff_function(
UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCamelCase = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
inspect_dataset(UpperCAmelCase , UpperCAmelCase )
snake_case_ = path + '.py'
assert script_name in os.listdir(UpperCAmelCase )
assert "__pycache__" not in os.listdir(UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
inspect_metric(UpperCAmelCase , UpperCAmelCase )
snake_case_ = path + '.py'
assert script_name in os.listdir(UpperCAmelCase )
assert "__pycache__" not in os.listdir(UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
with pytest.raises(UpperCAmelCase ):
get_dataset_config_info(UpperCAmelCase , config_name=UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = get_dataset_config_names(UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = get_dataset_infos(UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
snake_case_ = expected_configs[0]
assert expected_config in infos
snake_case_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
snake_case_ = get_dataset_infos(UpperCAmelCase )
assert expected_config in infos
snake_case_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
with pytest.raises(UpperCAmelCase ):
get_dataset_split_names(UpperCAmelCase , config_name=UpperCAmelCase )
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = LEDConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = "gelu"
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=99, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=20, lowerCAmelCase__=2, lowerCAmelCase__=1, lowerCAmelCase__=0, lowerCAmelCase__=4, ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def a_ ( self) -> Dict:
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
snake_case_ = tf.concat([input_ids, eos_tensor], axis=1)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
snake_case_ = prepare_led_inputs_dict(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tf.concat(
[tf.zeros_like(lowerCAmelCase__)[:, :-1], tf.ones_like(lowerCAmelCase__)[:, -1:]], axis=-1, )
snake_case_ = global_attention_mask
return config, inputs_dict
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = TFLEDModel(config=lowerCAmelCase__).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, use_cache=lowerCAmelCase__)
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3), config.vocab_size)
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens], axis=-1)
snake_case_ = tf.concat([attention_mask, next_attn_mask], axis=-1)
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__)[0]
snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, past_key_values=lowerCAmelCase__)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
snake_case_ = int(ids_tensor((1,), output_from_past.shape[-1]))
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__, lowerCAmelCase__, rtol=1e-3)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Any:
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Any:
snake_case_ = TFLEDModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'])
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict['global_attention_mask'], )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCAmelCase__):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(lowerCAmelCase__):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = len(lowerCAmelCase__)
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
if self.is_encoder_decoder:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_decoder_attentions_output(lowerCAmelCase__)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = model(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(lowerCAmelCase__))
self.assertEqual(model.config.output_hidden_states, lowerCAmelCase__)
check_encoder_attentions_output(lowerCAmelCase__)
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.')
def a_ ( self) -> Dict:
pass
def a_ ( self) -> int:
# TODO: Head-masking not yet implement
pass
def UpperCAmelCase ( UpperCAmelCase ) -> int:
return tf.constant(UpperCAmelCase , dtype=tf.intaa )
__UpperCamelCase = 1E-4
@slow
@require_tf
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> int:
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384').led
# change to intended input here
snake_case_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = prepare_led_inputs_dict(model.config, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = model(**lowerCAmelCase__)[0]
snake_case_ = (1, 1024, 768)
self.assertEqual(output.shape, lowerCAmelCase__)
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase__, atol=1e-3)
def a_ ( self) -> List[str]:
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384')
# change to intended input here
snake_case_ = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
snake_case_ = prepare_led_inputs_dict(model.config, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = model(**lowerCAmelCase__)[0]
snake_case_ = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, lowerCAmelCase__)
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], )
tf.debugging.assert_near(output[:, :3, :3], lowerCAmelCase__, atol=1e-3, rtol=1e-3)
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> List[Any]:
snake_case_ = tempfile.mkdtemp()
# fmt: off
snake_case_ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
snake_case_ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__))))
snake_case_ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
snake_case_ = {'unk_token': '<unk>'}
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(lowerCAmelCase__) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(lowerCAmelCase__))
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> int:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Dict:
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> Tuple:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> List[str]:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
snake_case_ = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> List[str]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> Dict:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = CLIPProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ = 'weight'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) -> Optional[Any]:
if config_path is not None:
snake_case_ = UniSpeechSatConfig.from_pretrained(UpperCAmelCase )
else:
snake_case_ = UniSpeechSatConfig()
snake_case_ = ''
if is_finetuned:
snake_case_ = UniSpeechSatForCTC(UpperCAmelCase )
else:
snake_case_ = UniSpeechSatForPreTraining(UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "pixel_values"
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = TimmBackboneConfig
def __init__( self, lowerCAmelCase__, **lowerCAmelCase__) -> Tuple:
requires_backends(self, 'timm')
super().__init__(lowerCAmelCase__)
snake_case_ = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.')
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.')
if hasattr(lowerCAmelCase__, 'out_features') and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.')
snake_case_ = getattr(lowerCAmelCase__, 'use_pretrained_backbone', lowerCAmelCase__)
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.')
# We just take the final layer by default. This matches the default for the transformers models.
snake_case_ = config.out_indices if getattr(lowerCAmelCase__, 'out_indices', lowerCAmelCase__) is not None else (-1,)
snake_case_ = timm.create_model(
config.backbone, pretrained=lowerCAmelCase__, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCAmelCase__, **lowerCAmelCase__, )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case_ = self._backbone.return_layers
snake_case_ = {layer['module']: str(lowerCAmelCase__) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]:
requires_backends(cls, ['vision', 'timm'])
from ...models.timm_backbone import TimmBackboneConfig
snake_case_ = kwargs.pop('config', TimmBackboneConfig())
snake_case_ = kwargs.pop('use_timm_backbone', lowerCAmelCase__)
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones')
snake_case_ = kwargs.pop('num_channels', config.num_channels)
snake_case_ = kwargs.pop('features_only', config.features_only)
snake_case_ = kwargs.pop('use_pretrained_backbone', config.use_pretrained_backbone)
snake_case_ = kwargs.pop('out_indices', config.out_indices)
snake_case_ = TimmBackboneConfig(
backbone=lowerCAmelCase__, num_channels=lowerCAmelCase__, features_only=lowerCAmelCase__, use_pretrained_backbone=lowerCAmelCase__, out_indices=lowerCAmelCase__, )
return super()._from_config(lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Tuple:
pass
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment')
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case_ = self._all_layers
snake_case_ = self._backbone(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self._return_layers
snake_case_ = tuple(hidden_states[i] for i in self.out_indices)
else:
snake_case_ = self._backbone(lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = None
snake_case_ = tuple(lowerCAmelCase__)
snake_case_ = tuple(lowerCAmelCase__) if hidden_states is not None else None
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
snake_case_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCAmelCase__, hidden_states=lowerCAmelCase__, attentions=lowerCAmelCase__)
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
"""simple docstring"""
import os
def UpperCAmelCase ( ) -> List[str]:
with open(os.path.dirname(UpperCAmelCase ) + '/p022_names.txt' ) as file:
snake_case_ = str(file.readlines()[0] )
snake_case_ = names.replace('"' , '' ).split(',' )
names.sort()
snake_case_ = 0
snake_case_ = 0
for i, name in enumerate(UpperCAmelCase ):
for letter in name:
name_score += ord(UpperCAmelCase ) - 64
total_score += (i + 1) * name_score
snake_case_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCamelCase = {'''UserAgent''': UserAgent().random}
def UpperCAmelCase ( UpperCAmelCase ) -> dict:
snake_case_ = script.contents[0]
snake_case_ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Any:
snake_case_ = f'https://www.instagram.com/{username}/'
snake_case_ = self.get_json()
def a_ ( self) -> dict:
snake_case_ = requests.get(self.url, headers=lowerCAmelCase__).text
snake_case_ = BeautifulSoup(lowerCAmelCase__, 'html.parser').find_all('script')
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__( self) -> str:
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self) -> str:
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def a_ ( self) -> str:
return self.user_data["username"]
@property
def a_ ( self) -> str:
return self.user_data["full_name"]
@property
def a_ ( self) -> str:
return self.user_data["biography"]
@property
def a_ ( self) -> str:
return self.user_data["business_email"]
@property
def a_ ( self) -> str:
return self.user_data["external_url"]
@property
def a_ ( self) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def a_ ( self) -> int:
return self.user_data["edge_follow"]["count"]
@property
def a_ ( self) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def a_ ( self) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def a_ ( self) -> bool:
return self.user_data["is_verified"]
@property
def a_ ( self) -> bool:
return self.user_data["is_private"]
def UpperCAmelCase ( UpperCAmelCase = "github" ) -> None:
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
snake_case_ = InstagramUser(UpperCAmelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCAmelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase = InstagramUser('''github''')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
"""simple docstring"""
__UpperCamelCase = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
from __future__ import annotations
import math
__UpperCamelCase = '''2020.9.26'''
__UpperCamelCase = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]:
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in locals().values() ):
snake_case_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCAmelCase )
snake_case_ = ((x * distance) / (z + distance)) * scale
snake_case_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float, float]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError('Axis must be a str' )
snake_case_ = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase , (float, int) ) for val in input_variables.values() ):
snake_case_ = (
'Input values except axis must either be float or int: '
f'{list(input_variables.values() )}'
)
raise TypeError(UpperCAmelCase )
snake_case_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
snake_case_ = x * math.cos(UpperCAmelCase ) - y * math.sin(UpperCAmelCase )
snake_case_ = y * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = z
elif axis == "x":
snake_case_ = y * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + y * math.sin(UpperCAmelCase )
snake_case_ = x
elif axis == "y":
snake_case_ = x * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
snake_case_ = z * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
snake_case_ = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 362
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCamelCase = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class UpperCamelCase :
def __init__( self, lowerCAmelCase__ = 14) -> None:
if group not in primes:
raise ValueError('Unsupported Group')
snake_case_ = primes[group]['prime']
snake_case_ = primes[group]['generator']
snake_case_ = int(hexlify(urandom(32)), base=16)
def a_ ( self) -> str:
return hex(self.__private_key)[2:]
def a_ ( self) -> str:
snake_case_ = pow(self.generator, self.__private_key, self.prime)
return hex(lowerCAmelCase__)[2:]
def a_ ( self, lowerCAmelCase__) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(lowerCAmelCase__, (self.prime - 1) // 2, self.prime) == 1
)
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = int(lowerCAmelCase__, base=16)
if not self.is_valid_public_key(lowerCAmelCase__):
raise ValueError('Invalid public key')
snake_case_ = pow(lowerCAmelCase__, self.__private_key, self.prime)
return shaaaa(str(lowerCAmelCase__).encode()).hexdigest()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCAmelCase__, (prime - 1) // 2, lowerCAmelCase__) == 1
)
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 14) -> str:
snake_case_ = int(lowerCAmelCase__, base=16)
snake_case_ = int(lowerCAmelCase__, base=16)
snake_case_ = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__, lowerCAmelCase__):
raise ValueError('Invalid public key')
snake_case_ = pow(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
return shaaaa(str(lowerCAmelCase__).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "roc_bert"
def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__="absolute", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=768, lowerCAmelCase__=910, lowerCAmelCase__=512, lowerCAmelCase__=2_4858, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> int:
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = enable_pronunciation
snake_case_ = enable_shape
snake_case_ = pronunciation_embed_dim
snake_case_ = pronunciation_vocab_size
snake_case_ = shape_embed_dim
snake_case_ = shape_vocab_size
snake_case_ = concat_input
snake_case_ = position_embedding_type
snake_case_ = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 365
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = 'huggingface/label-files'
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=UpperCAmelCase , num_labels=1000 , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , )
return config
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
if "stem.conv" in name:
snake_case_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
snake_case_ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
snake_case_ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
snake_case_ = 'bit.encoder.' + name
return name
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Any:
snake_case_ = get_config(UpperCAmelCase )
# load original model from timm
snake_case_ = create_model(UpperCAmelCase , pretrained=UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(UpperCAmelCase )
snake_case_ = val.squeeze() if 'head' in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# create image processor
snake_case_ = create_transform(**resolve_data_config({} , model=UpperCAmelCase ) )
snake_case_ = transform.transforms
snake_case_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ = prepare_img()
snake_case_ = transform(UpperCAmelCase ).unsqueeze(0 )
snake_case_ = processor(UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(UpperCAmelCase , UpperCAmelCase )
# verify logits
with torch.no_grad():
snake_case_ = model(UpperCAmelCase )
snake_case_ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__UpperCamelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = CustomTokenizer
pass
| 368
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCamelCase = None
try:
import msvcrt
except ImportError:
__UpperCamelCase = None
try:
import fcntl
except ImportError:
__UpperCamelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCamelCase = OSError
# Data
# ------------------------------------------------
__UpperCamelCase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__UpperCamelCase = '''3.0.12'''
__UpperCamelCase = None
def UpperCAmelCase ( ) -> int:
global _logger
snake_case_ = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__) -> Dict:
snake_case_ = lock_file
return None
def __str__( self) -> Dict:
snake_case_ = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> List[Any]:
snake_case_ = lock
return None
def __enter__( self) -> Union[str, Any]:
return self.lock
def __exit__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
self.lock.release()
return None
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> List[str]:
snake_case_ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case_ = self.hash_filename_if_too_long(lowerCAmelCase__, lowerCAmelCase__)
# The path to the lock file.
snake_case_ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case_ = None
# The default timeout value.
snake_case_ = timeout
# We use this lock primarily for the lock counter.
snake_case_ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case_ = 0
return None
@property
def a_ ( self) -> Optional[Any]:
return self._lock_file
@property
def a_ ( self) -> str:
return self._timeout
@timeout.setter
def a_ ( self, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = float(lowerCAmelCase__)
return None
def a_ ( self) -> Dict:
raise NotImplementedError()
def a_ ( self) -> Tuple:
raise NotImplementedError()
@property
def a_ ( self) -> Optional[Any]:
return self._lock_file_fd is not None
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=0.05) -> str:
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case_ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case_ = id(self)
snake_case_ = self._lock_file
snake_case_ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}')
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}')
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}')
raise Timeout(self._lock_file)
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...')
time.sleep(lowerCAmelCase__)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case_ = max(0, self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def a_ ( self, lowerCAmelCase__=False) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case_ = id(self)
snake_case_ = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}')
self._release()
snake_case_ = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}')
return None
def __enter__( self) -> Union[str, Any]:
self.acquire()
return self
def __exit__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
self.release()
return None
def __del__( self) -> Optional[int]:
self.release(force=lowerCAmelCase__)
return None
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = os.path.basename(lowerCAmelCase__)
if len(lowerCAmelCase__) > max_length and max_length > 0:
snake_case_ = os.path.dirname(lowerCAmelCase__)
snake_case_ = str(hash(lowerCAmelCase__))
snake_case_ = filename[: max_length - len(lowerCAmelCase__) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowerCAmelCase__, lowerCAmelCase__)
else:
return path
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase__, timeout=lowerCAmelCase__, max_filename_length=lowerCAmelCase__)
snake_case_ = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def a_ ( self) -> int:
snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase__, msvcrt.LK_NBLCK, 1)
except OSError:
os.close(lowerCAmelCase__)
else:
snake_case_ = fd
return None
def a_ ( self) -> Dict:
snake_case_ = self._lock_file_fd
snake_case_ = None
msvcrt.locking(lowerCAmelCase__, msvcrt.LK_UNLCK, 1)
os.close(lowerCAmelCase__)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=-1, lowerCAmelCase__=None) -> Optional[Any]:
snake_case_ = os.statvfs(os.path.dirname(lowerCAmelCase__)).f_namemax
super().__init__(lowerCAmelCase__, timeout=lowerCAmelCase__, max_filename_length=lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
try:
fcntl.flock(lowerCAmelCase__, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(lowerCAmelCase__)
else:
snake_case_ = fd
return None
def a_ ( self) -> Tuple:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case_ = self._lock_file_fd
snake_case_ = None
fcntl.flock(lowerCAmelCase__, fcntl.LOCK_UN)
os.close(lowerCAmelCase__)
return None
class UpperCamelCase ( lowerCAmelCase__ ):
def a_ ( self) -> str:
snake_case_ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case_ = os.open(self._lock_file, lowerCAmelCase__)
except OSError:
pass
else:
snake_case_ = fd
return None
def a_ ( self) -> Tuple:
os.close(self._lock_file_fd)
snake_case_ = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCamelCase = None
if msvcrt:
__UpperCamelCase = WindowsFileLock
elif fcntl:
__UpperCamelCase = UnixFileLock
else:
__UpperCamelCase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 369
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False ) -> str:
snake_case_ = 'backbone.' if is_semantic else ''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', 'beit.embeddings.cls_token'),
(f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'{prefix}pos_embed', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False ) -> int:
for i in range(config.num_hidden_layers ):
snake_case_ = 'backbone.' if is_semantic else ''
# queries, keys and values
snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = q_bias
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
snake_case_ = gamma_a
snake_case_ = gamma_a
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = dct.pop(UpperCAmelCase )
snake_case_ = val
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]:
snake_case_ = False if 'rvlcdip' in checkpoint_url else True
snake_case_ = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
# labels
if "rvlcdip" in checkpoint_url:
snake_case_ = 16
snake_case_ = 'huggingface/label-files'
snake_case_ = 'rvlcdip-id2label.json'
snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
snake_case_ = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
snake_case_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase , return_tensors='pt' )
snake_case_ = encoding['pixel_values']
snake_case_ = model(UpperCAmelCase )
snake_case_ = outputs.logits
# verify logits
snake_case_ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
snake_case_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
snake_case_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
__UpperCamelCase = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
__UpperCamelCase = tuple[float, float, float]
__UpperCamelCase = tuple[float, float, float]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Vectorad:
snake_case_ = end_pointa[0] - end_pointa[0]
snake_case_ = end_pointa[1] - end_pointa[1]
snake_case_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Vectorad:
snake_case_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
snake_case_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
snake_case_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
return tuple(round(UpperCAmelCase , UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 10 ) -> bool:
snake_case_ = create_vector(UpperCAmelCase , UpperCAmelCase )
snake_case_ = create_vector(UpperCAmelCase , UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
import mpmath # for roots of unity
import numpy as np
class UpperCamelCase :
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None) -> List[str]:
# Input as list
snake_case_ = list(poly_a or [0])[:]
snake_case_ = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case_ = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case_ = len(self.polyB)
# Add 0 to make lengths equal a power of 2
snake_case_ = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
snake_case_ = complex(mpmath.root(x=1, n=self.c_max_length, k=1))
# The product
snake_case_ = self.__multiply()
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(lowerCAmelCase__) <= 1:
return dft[0]
#
snake_case_ = self.c_max_length // 2
while next_ncol > 0:
snake_case_ = [[] for i in range(lowerCAmelCase__)]
snake_case_ = self.root**next_ncol
# First half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowerCAmelCase__):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
snake_case_ = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(lowerCAmelCase__):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
snake_case_ = new_dft
snake_case_ = next_ncol // 2
return dft[0]
def a_ ( self) -> Union[str, Any]:
snake_case_ = self.__dft('A')
snake_case_ = self.__dft('B')
snake_case_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case_ = 2
while next_ncol <= self.c_max_length:
snake_case_ = [[] for i in range(lowerCAmelCase__)]
snake_case_ = self.root ** (next_ncol // 2)
snake_case_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
snake_case_ = new_inverse_c
next_ncol *= 2
# Unpack
snake_case_ = [round(x[0].real, 8) + round(x[0].imag, 8) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self) -> Tuple:
snake_case_ = 'A = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A]))
snake_case_ = 'B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B]))
snake_case_ = 'A*B = ' + ' + '.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product))
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class UpperCamelCase :
def __init__( self) -> Dict:
snake_case_ = {}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> Any:
if self.graph.get(lowerCAmelCase__):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
snake_case_ = [[w, v]]
if not self.graph.get(lowerCAmelCase__):
snake_case_ = []
def a_ ( self) -> Any:
return list(self.graph)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any:
if self.graph.get(lowerCAmelCase__):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> Optional[Any]:
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__)
return visited
else:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return visited
def a_ ( self, lowerCAmelCase__=-1) -> Tuple:
if c == -1:
snake_case_ = floor(random() * 1_0000) + 10
for i in range(lowerCAmelCase__):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
snake_case_ = floor(random() * c) + 1
if n != i:
self.add_pair(lowerCAmelCase__, lowerCAmelCase__, 1)
def a_ ( self, lowerCAmelCase__=-2) -> Tuple:
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph)[0]
d.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
while d:
snake_case_ = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a_ ( self, lowerCAmelCase__) -> Dict:
return len(self.graph[u])
def a_ ( self, lowerCAmelCase__=-2) -> Any:
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = s
snake_case_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return sorted_nodes
def a_ ( self) -> List[Any]:
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
snake_case_ = len(lowerCAmelCase__) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = False
indirect_parents.append(lowerCAmelCase__)
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return list(lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
snake_case_ = len(lowerCAmelCase__) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = False
indirect_parents.append(lowerCAmelCase__)
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return False
def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> int:
snake_case_ = time()
self.dfs(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = time()
return end - begin
def a_ ( self, lowerCAmelCase__=-2) -> Tuple:
snake_case_ = time()
self.bfs(lowerCAmelCase__)
snake_case_ = time()
return end - begin
class UpperCamelCase :
def __init__( self) -> str:
snake_case_ = {}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> Union[str, Any]:
# check if the u exists
if self.graph.get(lowerCAmelCase__):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
snake_case_ = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
snake_case_ = [[w, u]]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> str:
if self.graph.get(lowerCAmelCase__):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__)
# the other way round
if self.graph.get(lowerCAmelCase__):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> Optional[Any]:
if s == d:
return []
snake_case_ = []
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__)
return visited
else:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return visited
def a_ ( self, lowerCAmelCase__=-1) -> Optional[Any]:
if c == -1:
snake_case_ = floor(random() * 1_0000) + 10
for i in range(lowerCAmelCase__):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
snake_case_ = floor(random() * c) + 1
if n != i:
self.add_pair(lowerCAmelCase__, lowerCAmelCase__, 1)
def a_ ( self, lowerCAmelCase__=-2) -> Dict:
snake_case_ = deque()
snake_case_ = []
if s == -2:
snake_case_ = list(self.graph)[0]
d.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
while d:
snake_case_ = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def a_ ( self, lowerCAmelCase__) -> Tuple:
return len(self.graph[u])
def a_ ( self) -> Tuple:
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
snake_case_ = len(lowerCAmelCase__) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = False
indirect_parents.append(lowerCAmelCase__)
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return list(lowerCAmelCase__)
def a_ ( self) -> str:
snake_case_ = []
snake_case_ = []
snake_case_ = list(self.graph)[0]
stack.append(lowerCAmelCase__)
visited.append(lowerCAmelCase__)
snake_case_ = -2
snake_case_ = []
snake_case_ = s
snake_case_ = False
snake_case_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
snake_case_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
snake_case_ = len(lowerCAmelCase__) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
snake_case_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ = True
if len(lowerCAmelCase__) != 0:
snake_case_ = stack[len(lowerCAmelCase__) - 1]
else:
snake_case_ = False
indirect_parents.append(lowerCAmelCase__)
snake_case_ = s
snake_case_ = ss
# check if se have reached the starting point
if len(lowerCAmelCase__) == 0:
return False
def a_ ( self) -> int:
return list(self.graph)
def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> int:
snake_case_ = time()
self.dfs(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = time()
return end - begin
def a_ ( self, lowerCAmelCase__=-2) -> str:
snake_case_ = time()
self.bfs(lowerCAmelCase__)
snake_case_ = time()
return end - begin
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, ) -> Any:
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 2
snake_case_ = 99
snake_case_ = 0
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = 'last'
snake_case_ = True
snake_case_ = None
snake_case_ = 0
def a_ ( self) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.floataa)
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
snake_case_ = ids_tensor([self.batch_size], 2, dtype=tf.floataa)
snake_case_ = ids_tensor([self.batch_size], self.num_choices)
snake_case_ = FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[Any]:
snake_case_ = TFFlaubertModel(config=lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Tuple:
snake_case_ = TFFlaubertWithLMHeadModel(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> str:
snake_case_ = TFFlaubertForQuestionAnsweringSimple(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> int:
snake_case_ = TFFlaubertForSequenceClassification(lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'lengths': input_lengths}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = self.num_labels
snake_case_ = TFFlaubertForTokenClassification(config=lowerCAmelCase__)
snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Tuple:
snake_case_ = self.num_choices
snake_case_ = TFFlaubertForMultipleChoice(config=lowerCAmelCase__)
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = tf.tile(tf.expand_dims(lowerCAmelCase__, 1), (1, self.num_choices, 1))
snake_case_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def a_ ( self) -> int:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE_ = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self) -> List[Any]:
snake_case_ = TFFlaubertModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, emb_dim=37)
def a_ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowerCAmelCase__)
@slow
def a_ ( self) -> Tuple:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFFlaubertModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
@slow
def a_ ( self) -> Union[str, Any]:
snake_case_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased')
snake_case_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]], dtype=tf.intaa, ) # "J'aime flaubert !"
snake_case_ = model(lowerCAmelCase__)[0]
snake_case_ = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape, lowerCAmelCase__)
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
from math import isqrt
def UpperCAmelCase ( UpperCAmelCase ) -> list[int]:
snake_case_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , UpperCAmelCase , UpperCAmelCase ):
snake_case_ = False
return [i for i in range(2 , UpperCAmelCase ) if is_prime[i]]
def UpperCAmelCase ( UpperCAmelCase = 10**8 ) -> int:
snake_case_ = calculate_prime_numbers(max_number // 2 )
snake_case_ = 0
snake_case_ = 0
snake_case_ = len(UpperCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = VQModel
SCREAMING_SNAKE_CASE_ = "sample"
@property
def a_ ( self, lowerCAmelCase__=(32, 32)) -> Dict:
snake_case_ = 4
snake_case_ = 3
snake_case_ = floats_tensor((batch_size, num_channels) + sizes).to(lowerCAmelCase__)
return {"sample": image}
@property
def a_ ( self) -> int:
return (3, 32, 32)
@property
def a_ ( self) -> Dict:
return (3, 32, 32)
def a_ ( self) -> Dict:
snake_case_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def a_ ( self) -> Dict:
pass
def a_ ( self) -> Any:
pass
def a_ ( self) -> List[Any]:
snake_case_ , snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy', output_loading_info=lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
self.assertEqual(len(loading_info['missing_keys']), 0)
model.to(lowerCAmelCase__)
snake_case_ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def a_ ( self) -> Tuple:
snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy')
model.to(lowerCAmelCase__).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
snake_case_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
snake_case_ = image.to(lowerCAmelCase__)
with torch.no_grad():
snake_case_ = model(lowerCAmelCase__).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-3))
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=99, lowerCAmelCase__=24, lowerCAmelCase__=2, lowerCAmelCase__=6, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=16, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=None, lowerCAmelCase__=1000, ) -> List[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def a_ ( self) -> Tuple:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a_ ( self) -> Optional[Any]:
return LiltConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[str]:
snake_case_ = LiltModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__)
snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__, token_type_ids=lowerCAmelCase__)
snake_case_ = model(lowerCAmelCase__, bbox=lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> List[Any]:
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(
lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, ) -> Optional[int]:
snake_case_ = LiltForQuestionAnswering(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(
lowerCAmelCase__, bbox=lowerCAmelCase__, attention_mask=lowerCAmelCase__, token_type_ids=lowerCAmelCase__, start_positions=lowerCAmelCase__, end_positions=lowerCAmelCase__, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def a_ ( self) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
return True
def a_ ( self) -> str:
snake_case_ = LiltModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> int:
self.config_tester.run_common_tests()
def a_ ( self) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__)
@slow
def a_ ( self) -> Any:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@require_torch
@slow
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
snake_case_ = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base').to(lowerCAmelCase__)
snake_case_ = torch.tensor([[1, 2]], device=lowerCAmelCase__)
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=lowerCAmelCase__, bbox=lowerCAmelCase__)
snake_case_ = torch.Size([1, 2, 768])
snake_case_ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]], device=lowerCAmelCase__, )
self.assertTrue(outputs.last_hidden_state.shape, lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], lowerCAmelCase__, atol=1e-3))
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase :
def __init__( self, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=None) -> Optional[Any]:
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self, lowerCAmelCase__) -> List[str]:
if not isinstance(lowerCAmelCase__, lowerCAmelCase__):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = False) -> Any:
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
snake_case_ = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
snake_case_ = text
def a_ ( self) -> List[str]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
snake_case_ = None
def a_ ( self, lowerCAmelCase__) -> List[str]:
self.generated_responses.append(lowerCAmelCase__)
def a_ ( self) -> int:
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self) -> Optional[int]:
snake_case_ = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase__ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]:
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase__)
return preprocess_params, forward_params, postprocess_params
def __call__( self, lowerCAmelCase__, lowerCAmelCase__=0, **lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = super().__call__(lowerCAmelCase__, num_workers=lowerCAmelCase__, **lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__) and len(lowerCAmelCase__) == 1:
return outputs[0]
return outputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=32) -> Dict[str, Any]:
if not isinstance(lowerCAmelCase__, lowerCAmelCase__):
raise ValueError('ConversationalPipeline, expects Conversation as inputs')
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method')
if hasattr(self.tokenizer, '_build_conversation_input_ids'):
snake_case_ = self.tokenizer._build_conversation_input_ids(lowerCAmelCase__)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(lowerCAmelCase__)
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids])
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=10, **lowerCAmelCase__) -> Optional[Any]:
snake_case_ = generate_kwargs.get('max_length', self.model.config.max_length)
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation')
snake_case_ = max_length
snake_case_ = self.model.generate(**lowerCAmelCase__, **lowerCAmelCase__)
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=True) -> List[str]:
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0], skip_special_tokens=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__, )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(lowerCAmelCase__)
return conversation
def a_ ( self, lowerCAmelCase__) -> Dict:
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
if len(lowerCAmelCase__) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use GLPNImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ , snake_case_ = pil_image.size
snake_case_ = pytesseract.image_to_data(UpperCAmelCase , lang=UpperCAmelCase , output_type='dict' , config=UpperCAmelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
snake_case_ = [idx for idx, word in enumerate(UpperCAmelCase ) if not word.strip()]
snake_case_ = [word for idx, word in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ = []
for x, y, w, h in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase )
# finally, normalize the bounding boxes
snake_case_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
assert len(UpperCAmelCase ) == len(UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = "", **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__)
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_value
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
snake_case_ = apply_ocr
snake_case_ = ocr_lang
snake_case_ = tesseract_config
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
snake_case_ = (size['height'], size['width'])
return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> PIL.Image.Image:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__)
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, 'pytesseract')
snake_case_ = []
snake_case_ = []
for image in images:
snake_case_ , snake_case_ = apply_tesseract(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
words_batch.append(lowerCAmelCase__)
boxes_batch.append(lowerCAmelCase__)
if do_resize:
snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCAmelCase__)
if apply_ocr:
snake_case_ = words_batch
snake_case_ = boxes_batch
return data
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = []
snake_case_ = fairseq_model.state_dict()
snake_case_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
snake_case_ = None
for name, value in fairseq_dict.items():
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
elif name.split('.' )[0] == "proj":
snake_case_ = fairseq_model.proj
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
snake_case_ = 'weight'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
return proj_weight
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCAmelCase ) -> str:
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
snake_case_ = f.readlines()
snake_case_ = [line.split(' ' )[0] for line in lines]
snake_case_ = len(UpperCAmelCase )
snake_case_ = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]:
snake_case_ = WavaVecaConfig.from_pretrained(UpperCAmelCase )
snake_case_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase , vocab_size=UpperCAmelCase , decoder_layers=UpperCAmelCase , do_stable_layer_norm=UpperCAmelCase )
snake_case_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
snake_case_ = model[0].eval()
# set weights for wav2vec2 encoder
snake_case_ = WavaVecaModel(UpperCAmelCase )
snake_case_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase )
snake_case_ = SpeechaTextaForCausalLM(UpperCAmelCase )
snake_case_ , snake_case_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase )
# set output linear layer
unexpected_keys.remove('embed_out' )
snake_case_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
snake_case_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase )
snake_case_ = False
# add projection layer
snake_case_ = nn.Parameter(projection_layer.weight )
snake_case_ = nn.Parameter(projection_layer.bias )
snake_case_ = create_vocab_dict(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
snake_case_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCAmelCase )
snake_case_ = hf_wavavec.config.to_dict()
snake_case_ = tokenizer.pad_token_id
snake_case_ = tokenizer.bos_token_id
snake_case_ = tokenizer.eos_token_id
snake_case_ = 'speech_to_text_2'
snake_case_ = 'wav2vec2'
snake_case_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase )
hf_wavavec.save_pretrained(UpperCAmelCase )
feature_extractor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__UpperCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
if k in (0.04, 0.06):
snake_case_ = k
snake_case_ = window_size
else:
raise ValueError('invalid k value')
def __str__( self) -> str:
return str(self.k)
def a_ ( self, lowerCAmelCase__) -> tuple[cva.Mat, list[list[int]]]:
snake_case_ = cva.imread(lowerCAmelCase__, 0)
snake_case_ , snake_case_ = img.shape
snake_case_ = []
snake_case_ = img.copy()
snake_case_ = cva.cvtColor(lowerCAmelCase__, cva.COLOR_GRAY2RGB)
snake_case_ , snake_case_ = np.gradient(lowerCAmelCase__)
snake_case_ = dx**2
snake_case_ = dy**2
snake_case_ = dx * dy
snake_case_ = 0.04
snake_case_ = self.window_size // 2
for y in range(lowerCAmelCase__, h - offset):
for x in range(lowerCAmelCase__, w - offset):
snake_case_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case_ = (wxx * wyy) - (wxy**2)
snake_case_ = wxx + wyy
snake_case_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r])
color_img.itemset((y, x, 0), 0)
color_img.itemset((y, x, 1), 0)
color_img.itemset((y, x, 2), 255)
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase , __UpperCamelCase = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCamelCase :
def __init__( self, lowerCAmelCase__ = "cpu", lowerCAmelCase__ = "openai/clip-vit-large-patch14") -> None:
snake_case_ = device
snake_case_ = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__)
snake_case_ = [0.48145466, 0.4578275, 0.40821073]
snake_case_ = [0.26862954, 0.26130258, 0.27577711]
snake_case_ = torchvision.transforms.Normalize(self.image_mean, self.image_std)
snake_case_ = torchvision.transforms.Resize(224)
snake_case_ = torchvision.transforms.CenterCrop(224)
def a_ ( self, lowerCAmelCase__) -> List[Any]:
snake_case_ = self.resize(lowerCAmelCase__)
snake_case_ = self.center_crop(lowerCAmelCase__)
snake_case_ = self.normalize(lowerCAmelCase__)
return images
def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Dict:
snake_case_ = self.tokenizer(text=lowerCAmelCase__, **lowerCAmelCase__)
snake_case_ = self.preprocess_img(lowerCAmelCase__)
snake_case_ = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase__=10, lowerCAmelCase__=0.01, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__="image", lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, ) -> None:
super().__init__()
snake_case_ = None
snake_case_ = device if device else get_device()
if vqgan:
snake_case_ = vqgan
else:
snake_case_ = load_vqgan(self.device, conf_path=lowerCAmelCase__, ckpt_path=lowerCAmelCase__)
self.vqgan.eval()
if clip:
snake_case_ = clip
else:
snake_case_ = CLIPModel.from_pretrained('openai/clip-vit-base-patch32')
self.clip.to(self.device)
snake_case_ = ProcessorGradientFlow(device=self.device)
snake_case_ = iterations
snake_case_ = lr
snake_case_ = log
snake_case_ = make_grid
snake_case_ = return_val
snake_case_ = quantize
snake_case_ = self.vqgan.decoder.z_shape
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=5, lowerCAmelCase__=True) -> List[str]:
snake_case_ = []
if output_path is None:
snake_case_ = './animation.gif'
if input_path is None:
snake_case_ = self.save_path
snake_case_ = sorted(glob(input_path + '/*'))
if not len(lowerCAmelCase__):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)')
if len(lowerCAmelCase__) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)')
snake_case_ = total_duration / len(lowerCAmelCase__)
snake_case_ = [frame_duration] * len(lowerCAmelCase__)
if extend_frames:
snake_case_ = 1.5
snake_case_ = 3
for file_name in paths:
if file_name.endswith('.png'):
images.append(imageio.imread(lowerCAmelCase__))
imageio.mimsave(lowerCAmelCase__, lowerCAmelCase__, duration=lowerCAmelCase__)
print(f'gif saved to {output_path}')
def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None) -> List[str]:
if not (path or img):
raise ValueError('Input either path or tensor')
if img is not None:
raise NotImplementedError
snake_case_ = preprocess(Image.open(lowerCAmelCase__), target_image_size=256).to(self.device)
snake_case_ = preprocess_vqgan(lowerCAmelCase__)
snake_case_ , *snake_case_ = self.vqgan.encode(lowerCAmelCase__)
return z
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = self.latent.detach().requires_grad_()
snake_case_ = base_latent + transform_vector
if self.quantize:
snake_case_ , *snake_case_ = self.vqgan.quantize(lowerCAmelCase__)
else:
snake_case_ = trans_latent
return self.vqgan.decode(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Optional[int]:
snake_case_ = self.clip_preprocessor(text=lowerCAmelCase__, images=lowerCAmelCase__, return_tensors='pt', padding=lowerCAmelCase__)
snake_case_ = self.clip(**lowerCAmelCase__)
snake_case_ = clip_outputs.logits_per_image
if weights is not None:
snake_case_ = similarity_logits * weights
return similarity_logits.sum()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = self._get_clip_similarity(pos_prompts['prompts'], lowerCAmelCase__, weights=(1 / pos_prompts['weights']))
if neg_prompts:
snake_case_ = self._get_clip_similarity(neg_prompts['prompts'], lowerCAmelCase__, weights=neg_prompts['weights'])
else:
snake_case_ = torch.tensor([1], device=self.device)
snake_case_ = -torch.log(lowerCAmelCase__) + torch.log(lowerCAmelCase__)
return loss
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = torch.randn_like(self.latent, requires_grad=lowerCAmelCase__, device=self.device)
snake_case_ = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
snake_case_ = self._add_vector(lowerCAmelCase__)
snake_case_ = loop_post_process(lowerCAmelCase__)
snake_case_ = self._get_CLIP_loss(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
print('CLIP loss', lowerCAmelCase__)
if self.log:
wandb.log({'CLIP Loss': clip_loss})
clip_loss.backward(retain_graph=lowerCAmelCase__)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
wandb.init(reinit=lowerCAmelCase__, project='face-editor')
wandb.config.update({'Positive Prompts': positive_prompts})
wandb.config.update({'Negative Prompts': negative_prompts})
wandb.config.update({'lr': self.lr, 'iterations': self.iterations})
if image_path:
snake_case_ = Image.open(lowerCAmelCase__)
snake_case_ = image.resize((256, 256))
wandb.log('Original Image', wandb.Image(lowerCAmelCase__))
def a_ ( self, lowerCAmelCase__) -> Union[str, Any]:
if not prompts:
return []
snake_case_ = []
snake_case_ = []
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [prompt.strip() for prompt in prompts.split('|')]
for prompt in prompts:
if isinstance(lowerCAmelCase__, (tuple, list)):
snake_case_ = prompt[0]
snake_case_ = float(prompt[1])
elif ":" in prompt:
snake_case_ , snake_case_ = prompt.split(':')
snake_case_ = float(lowerCAmelCase__)
else:
snake_case_ = prompt
snake_case_ = 1.0
processed_prompts.append(lowerCAmelCase__)
weights.append(lowerCAmelCase__)
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase__, device=self.device),
}
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=None, ) -> Any:
if image_path:
snake_case_ = self._get_latent(lowerCAmelCase__)
else:
snake_case_ = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
assert pos_prompts, "You must provide at least one positive prompt."
snake_case_ = self.process_prompts(lowerCAmelCase__)
snake_case_ = self.process_prompts(lowerCAmelCase__)
if save_final and save_path is None:
snake_case_ = os.path.join('./outputs/', '_'.join(pos_prompts['prompts']))
if not os.path.exists(lowerCAmelCase__):
os.makedirs(lowerCAmelCase__)
else:
snake_case_ = save_path + '_' + get_timestamp()
os.makedirs(lowerCAmelCase__)
snake_case_ = save_path
snake_case_ = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print('Original Image')
show_pil(custom_to_pil(lowerCAmelCase__))
snake_case_ = loop_post_process(lowerCAmelCase__)
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)):
if show_intermediate:
show_pil(lowerCAmelCase__)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}.png'))
if self.log:
wandb.log({'Image': wandb.Image(lowerCAmelCase__)})
if show_final:
show_pil(lowerCAmelCase__)
if save_final:
transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}_final.png'))
| 362
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 0
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List[str]:
snake_case_ , snake_case_ = emb.weight.shape
snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
snake_case_ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )
snake_case_ = mam_aaa['args'] or mam_aaa['cfg']['model']
snake_case_ = mam_aaa['model']
remove_ignore_keys_(UpperCAmelCase )
snake_case_ = state_dict['encoder.embed_tokens.weight'].shape[0]
snake_case_ = MaMaaaConfig(
vocab_size=UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
snake_case_ = state_dict['decoder.embed_tokens.weight']
snake_case_ = MaMaaaForConditionalGeneration(UpperCAmelCase )
model.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
snake_case_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 363
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCamelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__ = 101) -> int:
snake_case_ = length
def __len__( self) -> Optional[Any]:
return self.length
def __getitem__( self, lowerCAmelCase__) -> int:
return i
class UpperCamelCase :
def __call__( self, lowerCAmelCase__) -> Optional[Any]:
return {"input_ids": torch.tensor(lowerCAmelCase__), "labels": torch.tensor(lowerCAmelCase__)}
class UpperCamelCase ( nn.Module ):
def __init__( self) -> Optional[Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case_ = nn.Linear(120, 80)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Tuple:
if labels is not None:
return torch.tensor(0.0, device=input_ids.device), input_ids
else:
return input_ids
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch_neuroncore
def a_ ( self) -> Any:
snake_case_ = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'--output_dir {output_dir}'.split()
snake_case_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase ( lowerCAmelCase__ ):
@require_torch_multi_gpu
def a_ ( self) -> Any:
snake_case_ = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = f'--output_dir {output_dir}'.split()
snake_case_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__, env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCamelCase = HfArgumentParser((TrainingArguments,))
__UpperCamelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCamelCase = DummyDataset(dataset_length)
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
snake_case_ = list(range(len(UpperCAmelCase ) ) )
snake_case_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__UpperCamelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase = 2
__UpperCamelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCamelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCamelCase = None
| 365
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
import sys
import turtle
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
__UpperCamelCase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
__UpperCamelCase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__UpperCamelCase = True
except ImportError:
__UpperCamelCase = False
try:
from torch.hub import _get_torch_home
__UpperCamelCase = _get_torch_home()
except ImportError:
__UpperCamelCase = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
__UpperCamelCase = os.path.join(torch_cache_home, '''transformers''')
__UpperCamelCase = '''https://cdn.huggingface.co'''
__UpperCamelCase = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
__UpperCamelCase = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
__UpperCamelCase = os.path.join(PATH, '''config.yaml''')
__UpperCamelCase = os.path.join(PATH, '''attributes.txt''')
__UpperCamelCase = os.path.join(PATH, '''objects.txt''')
__UpperCamelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
__UpperCamelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
__UpperCamelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
__UpperCamelCase = '''pytorch_model.bin'''
__UpperCamelCase = '''config.yaml'''
def UpperCAmelCase ( UpperCAmelCase=OBJECTS , UpperCAmelCase=ATTRIBUTES ) -> int:
snake_case_ = []
with open(UpperCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
snake_case_ = []
with open(UpperCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCAmelCase ( UpperCAmelCase ) -> Any:
snake_case_ = OrderedDict()
with open(UpperCAmelCase , 'rb' ) as f:
snake_case_ = pkl.load(UpperCAmelCase )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
snake_case_ = ckp.pop(UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
snake_case_ = torch.tensor(UpperCAmelCase )
else:
assert isinstance(UpperCAmelCase , torch.tensor ), type(UpperCAmelCase )
snake_case_ = v
return r
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = {}
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "root", lowerCAmelCase__=0) -> str:
snake_case_ = name
snake_case_ = level
snake_case_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
snake_case_ = copy.deepcopy(lowerCAmelCase__)
snake_case_ = copy.deepcopy(lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = Config(lowerCAmelCase__, name=lowerCAmelCase__, level=level + 1)
snake_case_ = v
setattr(self, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = d
def __repr__( self) -> int:
return str(list((self._pointer.keys())))
def __setattr__( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = val
snake_case_ = val
snake_case_ = key.split('.')
snake_case_ = len(lowerCAmelCase__) - 1
snake_case_ = self._pointer
if len(lowerCAmelCase__) > 1:
for i, l in enumerate(lowerCAmelCase__):
if hasattr(self, lowerCAmelCase__) and isinstance(getattr(self, lowerCAmelCase__), lowerCAmelCase__):
setattr(getattr(self, lowerCAmelCase__), '.'.join(levels[i:]), lowerCAmelCase__)
if l == last_level:
snake_case_ = val
else:
snake_case_ = pointer[l]
def a_ ( self) -> List[Any]:
return self._pointer
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
with open(f'{file_name}', 'w') as stream:
dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
with open(f'{file_name}', 'w') as stream:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
@staticmethod
def a_ ( lowerCAmelCase__) -> Optional[int]:
with open(lowerCAmelCase__) as stream:
snake_case_ = load(lowerCAmelCase__, Loader=lowerCAmelCase__)
return data
def __str__( self) -> Dict:
snake_case_ = ' '
if self._name != "root":
snake_case_ = f'{t * (self._level-1)}{self._name}:\n'
else:
snake_case_ = ''
snake_case_ = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__).__name__})\n'
snake_case_ = level
return r[:-1]
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]:
snake_case_ , snake_case_ = cls.get_config_dict(lowerCAmelCase__, **lowerCAmelCase__)
return cls(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> int:
snake_case_ = kwargs.pop('cache_dir', lowerCAmelCase__)
snake_case_ = kwargs.pop('force_download', lowerCAmelCase__)
snake_case_ = kwargs.pop('resume_download', lowerCAmelCase__)
snake_case_ = kwargs.pop('proxies', lowerCAmelCase__)
snake_case_ = kwargs.pop('local_files_only', lowerCAmelCase__)
if os.path.isdir(lowerCAmelCase__):
snake_case_ = os.path.join(lowerCAmelCase__, lowerCAmelCase__)
elif os.path.isfile(lowerCAmelCase__) or is_remote_url(lowerCAmelCase__):
snake_case_ = pretrained_model_name_or_path
else:
snake_case_ = hf_bucket_url(lowerCAmelCase__, filename=lowerCAmelCase__, use_cdn=lowerCAmelCase__)
try:
# Load from URL or cache if already cached
snake_case_ = cached_path(
lowerCAmelCase__, cache_dir=lowerCAmelCase__, force_download=lowerCAmelCase__, proxies=lowerCAmelCase__, resume_download=lowerCAmelCase__, local_files_only=lowerCAmelCase__, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
snake_case_ = Config.load_yaml(lowerCAmelCase__)
except EnvironmentError:
snake_case_ = 'Can\'t load config for'
raise EnvironmentError(lowerCAmelCase__)
if resolved_config_file == config_file:
print('loading configuration file from path')
else:
print('loading configuration file cache')
return Config.load_yaml(lowerCAmelCase__), kwargs
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
snake_case_ = torch.load('dump.pt' , map_location=in_tensor.device )
snake_case_ = in_tensor.numpy()
snake_case_ = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCAmelCase , UpperCAmelCase , rtol=0.01 , atol=0.1 ), (
f'{sum([1 for x in np.isclose(UpperCAmelCase , UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = urlparse(UpperCAmelCase )
return parsed.scheme in ("http", "https")
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> str:
snake_case_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
snake_case_ = '/' not in model_id
if legacy_format:
return f'{endpoint}/{model_id}-{filename}'
else:
return f'{endpoint}/{model_id}/{filename}'
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase=None , ) -> List[Any]:
snake_case_ = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + "; ".join('{}/{}'.format(UpperCAmelCase , UpperCAmelCase ) for k, v in user_agent.items() )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
ua += "; " + user_agent
snake_case_ = {'user-agent': ua}
if resume_size > 0:
snake_case_ = 'bytes=%d-' % (resume_size,)
snake_case_ = requests.get(UpperCAmelCase , stream=UpperCAmelCase , proxies=UpperCAmelCase , headers=UpperCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
snake_case_ = response.headers.get('Content-Length' )
snake_case_ = resume_size + int(UpperCAmelCase ) if content_length is not None else None
snake_case_ = tqdm(
unit='B' , unit_scale=UpperCAmelCase , total=UpperCAmelCase , initial=UpperCAmelCase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCAmelCase ) )
temp_file.write(UpperCAmelCase )
progress.close()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=10 , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , ) -> List[Any]:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = str(UpperCAmelCase )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
snake_case_ = None
if not local_files_only:
try:
snake_case_ = requests.head(UpperCAmelCase , allow_redirects=UpperCAmelCase , proxies=UpperCAmelCase , timeout=UpperCAmelCase )
if response.status_code == 200:
snake_case_ = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
snake_case_ = url_to_filename(UpperCAmelCase , UpperCAmelCase )
# get cache path to put the file
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCAmelCase ):
return cache_path
else:
snake_case_ = [
file
for file in fnmatch.filter(os.listdir(UpperCAmelCase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(UpperCAmelCase ) > 0:
return os.path.join(UpperCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(UpperCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
snake_case_ = cache_path + '.lock'
with FileLock(UpperCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
snake_case_ = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(UpperCAmelCase , 'a+b' ) as f:
yield f
snake_case_ = _resumable_file_manager
if os.path.exists(UpperCAmelCase ):
snake_case_ = os.stat(UpperCAmelCase ).st_size
else:
snake_case_ = 0
else:
snake_case_ = partial(tempfile.NamedTemporaryFile , dir=UpperCAmelCase , delete=UpperCAmelCase )
snake_case_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , UpperCAmelCase , temp_file.name , )
http_get(
UpperCAmelCase , UpperCAmelCase , proxies=UpperCAmelCase , resume_size=UpperCAmelCase , user_agent=UpperCAmelCase , )
os.replace(temp_file.name , UpperCAmelCase )
snake_case_ = {'url': url, 'etag': etag}
snake_case_ = cache_path + '.json'
with open(UpperCAmelCase , 'w' ) as meta_file:
json.dump(UpperCAmelCase , UpperCAmelCase )
return cache_path
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> str:
snake_case_ = url.encode('utf-8' )
snake_case_ = shaaaa(UpperCAmelCase )
snake_case_ = url_hash.hexdigest()
if etag:
snake_case_ = etag.encode('utf-8' )
snake_case_ = shaaaa(UpperCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> Tuple:
if cache_dir is None:
snake_case_ = TRANSFORMERS_CACHE
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = str(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = str(UpperCAmelCase )
if is_remote_url(UpperCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
snake_case_ = get_from_cache(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , user_agent=UpperCAmelCase , local_files_only=UpperCAmelCase , )
elif os.path.exists(UpperCAmelCase ):
# File, and it exists.
snake_case_ = url_or_filename
elif urlparse(UpperCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(UpperCAmelCase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(UpperCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(UpperCAmelCase ) and not tarfile.is_tarfile(UpperCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
snake_case_ , snake_case_ = os.path.split(UpperCAmelCase )
snake_case_ = output_file.replace('.' , '-' ) + '-extracted'
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ) and os.listdir(UpperCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
snake_case_ = output_path + '.lock'
with FileLock(UpperCAmelCase ):
shutil.rmtree(UpperCAmelCase , ignore_errors=UpperCAmelCase )
os.makedirs(UpperCAmelCase )
if is_zipfile(UpperCAmelCase ):
with ZipFile(UpperCAmelCase , 'r' ) as zip_file:
zip_file.extractall(UpperCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(UpperCAmelCase ):
snake_case_ = tarfile.open(UpperCAmelCase )
tar_file.extractall(UpperCAmelCase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(UpperCAmelCase ) )
return output_path_extracted
return output_path
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="," ) -> Dict:
assert isinstance(UpperCAmelCase , UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ):
with open(UpperCAmelCase ) as f:
snake_case_ = eval(f.read() )
else:
snake_case_ = requests.get(UpperCAmelCase )
try:
snake_case_ = requests.json()
except Exception:
snake_case_ = req.content.decode()
assert data is not None, "could not connect"
try:
snake_case_ = eval(UpperCAmelCase )
except Exception:
snake_case_ = data.split('\n' )
req.close()
return data
def UpperCAmelCase ( UpperCAmelCase ) -> str:
snake_case_ = requests.get(UpperCAmelCase )
snake_case_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as stream:
snake_case_ = pkl.load(UpperCAmelCase )
snake_case_ = weights.pop('model' )
snake_case_ = {}
for k, v in model.items():
snake_case_ = torch.from_numpy(UpperCAmelCase )
if "running_var" in k:
snake_case_ = torch.tensor([0] )
snake_case_ = k.replace('running_var' , 'num_batches_tracked' )
snake_case_ = zero
return new
def UpperCAmelCase ( ) -> List[str]:
print(f'{os.path.abspath(os.path.join(UpperCAmelCase , os.pardir ) )}/demo.ipynb' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="RGB" ) -> List[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase )
if os.path.isfile(UpperCAmelCase ):
snake_case_ = cva.imread(UpperCAmelCase )
else:
snake_case_ = get_image_from_url(UpperCAmelCase )
assert img is not None, f'could not connect to: {im}'
snake_case_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
snake_case_ = img[:, :, ::-1]
return img
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> List[Any]:
return (images[i : i + batch] for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ))
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__UpperCamelCase = TypeVar('''T''')
__UpperCamelCase = Union[List[T], Tuple[T, ...]]
__UpperCamelCase = Union[T, List[T], Dict[str, T]]
__UpperCamelCase = Union[str, bytes, os.PathLike]
| 368
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 0
|
"""simple docstring"""
__UpperCamelCase = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 369
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "vit"
def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=16, **lowerCAmelCase__, ) -> List[str]:
super().__init__(**lowerCAmelCase__)
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = encoder_stride
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["pixel_values"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = IMAGENET_DEFAULT_MEAN, lowerCAmelCase__ = IMAGENET_DEFAULT_STD, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = size if size is not None else {'shortest_edge': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size')
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ = int((256 / 224) * size['shortest_edge'])
snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}')
return resize(
lowerCAmelCase__, size=(size_dict['height'], size_dict['width']), resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
snake_case_ = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray:
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> BatchFeature:
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__)
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
snake_case_ = [self.resize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_rescale:
snake_case_ = [self.rescale(lowerCAmelCase__, lowerCAmelCase__) for image in images]
if do_normalize:
snake_case_ = [self.normalize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
snake_case_ = len(UpperCAmelCase )
for _ in range(UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
snake_case_ , snake_case_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__UpperCamelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "nat"
SCREAMING_SNAKE_CASE_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, lowerCAmelCase__=4, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[3, 4, 6, 5], lowerCAmelCase__=[2, 4, 8, 16], lowerCAmelCase__=7, lowerCAmelCase__=3.0, lowerCAmelCase__=True, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__="gelu", lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.0, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(lowerCAmelCase__)
snake_case_ = num_heads
snake_case_ = kernel_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(lowerCAmelCase__) - 1))
snake_case_ = layer_scale_init_value
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__, nn.Convad) or isinstance(lowerCAmelCase__, nn.BatchNormad)
if has_not_submodules:
self.traced.append(lowerCAmelCase__)
def __call__( self, lowerCAmelCase__) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(lowerCAmelCase__)
[x.remove() for x in self.handles]
return self
@property
def a_ ( self) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 4_2
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ )
def __call__( self, lowerCAmelCase__) -> Optional[int]:
snake_case_ = Tracker(self.dest)(lowerCAmelCase__).parametrized
snake_case_ = Tracker(self.src)(lowerCAmelCase__).parametrized
snake_case_ = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip, lowerCAmelCase__))
snake_case_ = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip, lowerCAmelCase__))
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise Exception(
f'Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while'
f' destination module has {len(lowerCAmelCase__)}.')
for dest_m, src_m in zip(lowerCAmelCase__, lowerCAmelCase__):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}')
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True ) -> List[str]:
print(f'Converting {name}...' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase ).eval()
snake_case_ = ResNetForImageClassification(UpperCAmelCase ).eval()
snake_case_ = ModuleTransfer(src=UpperCAmelCase , dest=UpperCAmelCase )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCAmelCase )
assert torch.allclose(from_model(UpperCAmelCase ) , our_model(UpperCAmelCase ).logits ), "The model logits don't match the original one."
snake_case_ = f'resnet{"-".join(name.split("resnet" ) )}'
print(UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True ) -> Tuple:
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase , names_to_config[model_name] , UpperCAmelCase , UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase :
@property
def a_ ( self) -> Optional[Any]:
return self.get_dummy_input()
@property
def a_ ( self) -> Tuple:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.')
def a_ ( self, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, ) -> Dict:
snake_case_ = 4
snake_case_ = 32
snake_case_ = (32, 32)
snake_case_ = torch.manual_seed(0)
snake_case_ = torch.device(lowerCAmelCase__)
snake_case_ = (batch_size, num_channels) + sizes
snake_case_ = randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__)
snake_case_ = {'hidden_states': hidden_states}
if include_temb:
snake_case_ = 128
snake_case_ = randn_tensor((batch_size, temb_channels), generator=lowerCAmelCase__, device=lowerCAmelCase__)
if include_res_hidden_states_tuple:
snake_case_ = torch.manual_seed(1)
snake_case_ = (randn_tensor(lowerCAmelCase__, generator=lowerCAmelCase__, device=lowerCAmelCase__),)
if include_encoder_hidden_states:
snake_case_ = floats_tensor((batch_size, 32, 32)).to(lowerCAmelCase__)
if include_skip_sample:
snake_case_ = randn_tensor(((batch_size, 3) + sizes), generator=lowerCAmelCase__, device=lowerCAmelCase__)
return dummy_input
def a_ ( self) -> Any:
snake_case_ = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
snake_case_ = 32
if self.block_type == "mid":
init_dict.pop('out_channels')
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def a_ ( self, lowerCAmelCase__) -> List[str]:
snake_case_ , snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**lowerCAmelCase__)
unet_block.to(lowerCAmelCase__)
unet_block.eval()
with torch.no_grad():
snake_case_ = unet_block(**lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = output[0]
self.assertEqual(output.shape, self.output_shape)
snake_case_ = output[0, -1, -3:, -3:]
snake_case_ = torch.tensor(lowerCAmelCase__).to(lowerCAmelCase__)
assert torch_all_close(output_slice.flatten(), lowerCAmelCase__, atol=5e-3)
@unittest.skipIf(torch_device == 'mps', 'Training is not supported in mps')
def a_ ( self) -> List[str]:
snake_case_ , snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.block_class(**lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.train()
snake_case_ = model(**lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = output[0]
snake_case_ = torch.device(lowerCAmelCase__)
snake_case_ = randn_tensor(output.shape, device=lowerCAmelCase__)
snake_case_ = torch.nn.functional.mse_loss(lowerCAmelCase__, lowerCAmelCase__)
loss.backward()
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 50 ) -> int:
snake_case_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
"""simple docstring"""
import string
def UpperCAmelCase ( UpperCAmelCase ) -> None:
for key in range(len(string.ascii_uppercase ) ):
snake_case_ = ''
for symbol in message:
if symbol in string.ascii_uppercase:
snake_case_ = string.ascii_uppercase.find(UpperCAmelCase )
snake_case_ = num - key
if num < 0:
snake_case_ = num + len(string.ascii_uppercase )
snake_case_ = translated + string.ascii_uppercase[num]
else:
snake_case_ = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def UpperCAmelCase ( ) -> None:
snake_case_ = input('Encrypted message: ' )
snake_case_ = message.upper()
decrypt(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__UpperCamelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None) -> Union[str, Any]:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples', 'by_feature'))
snake_case_ = os.path.abspath('examples')
for item in os.listdir(lowerCAmelCase__):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(lowerCAmelCase__, lowerCAmelCase__)
if os.path.isfile(lowerCAmelCase__) and ".py" in item_path:
with self.subTest(
tested_script=lowerCAmelCase__, feature_script=lowerCAmelCase__, tested_section='main()' if parser_only else 'training_function()', ):
snake_case_ = compare_against_test(
os.path.join(lowerCAmelCase__, lowerCAmelCase__), lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = '\n'.join(lowerCAmelCase__)
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(lowerCAmelCase__, '')
self.assertEqual(lowerCAmelCase__, '')
def a_ ( self) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
self.one_complete_example('complete_nlp_example.py', lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = os.path.abspath(os.path.join('examples', 'cv_example.py'))
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
self.one_complete_example('complete_cv_example.py', lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = False
@classmethod
def a_ ( cls) -> List[Any]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir, 'default_config.yml')
write_basic_config(save_location=cls.configPath)
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def a_ ( cls) -> List[str]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def a_ ( self) -> int:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0')))
def a_ ( self) -> Optional[int]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
snake_case_ = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2')))
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "epoch_0")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, "step_2")}\n '.split()
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
else:
self.assertIn('epoch 0:', lowerCAmelCase__)
self.assertIn('epoch 1:', lowerCAmelCase__)
@slow
def a_ ( self) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'}):
snake_case_ = run_command(self._launch_args + testargs, return_stdout=lowerCAmelCase__)
snake_case_ = re.findall('({.+})', lowerCAmelCase__)
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(lowerCAmelCase__)
self.assertGreaterEqual(results['accuracy'], 0.75)
def a_ ( self) -> int:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'})
def a_ ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__, 'tracking')))
def a_ ( self) -> Optional[Any]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs)
def a_ ( self) -> Optional[int]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs)
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["input_features"]
def __init__( self, lowerCAmelCase__=80, lowerCAmelCase__=1_6000, lowerCAmelCase__=160, lowerCAmelCase__=30, lowerCAmelCase__=400, lowerCAmelCase__=0.0, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Dict:
super().__init__(
feature_size=lowerCAmelCase__, sampling_rate=lowerCAmelCase__, padding_value=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = n_fft
snake_case_ = hop_length
snake_case_ = chunk_length
snake_case_ = chunk_length * sampling_rate
snake_case_ = self.n_samples // hop_length
snake_case_ = sampling_rate
snake_case_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=lowerCAmelCase__, min_frequency=0.0, max_frequency=8000.0, sampling_rate=lowerCAmelCase__, norm='slaney', mel_scale='slaney', )
def a_ ( self, lowerCAmelCase__) -> np.ndarray:
snake_case_ = spectrogram(
lowerCAmelCase__, window_function(self.n_fft, 'hann'), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel='log10', )
snake_case_ = log_spec[:, :-1]
snake_case_ = np.maximum(lowerCAmelCase__, log_spec.max() - 8.0)
snake_case_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = 0.0) -> List[np.ndarray]:
if attention_mask is not None:
snake_case_ = np.array(lowerCAmelCase__, np.intaa)
snake_case_ = []
for vector, length in zip(lowerCAmelCase__, attention_mask.sum(-1)):
snake_case_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
snake_case_ = padding_value
normed_input_values.append(lowerCAmelCase__)
else:
snake_case_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self, lowerCAmelCase__, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = "max_length", lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
snake_case_ = isinstance(lowerCAmelCase__, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}')
snake_case_ = is_batched_numpy or (
isinstance(lowerCAmelCase__, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
snake_case_ = [np.asarray([speech], dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__, np.ndarray):
snake_case_ = np.asarray(lowerCAmelCase__, dtype=np.floataa)
elif isinstance(lowerCAmelCase__, np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
snake_case_ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
snake_case_ = [np.asarray([raw_speech]).T]
snake_case_ = BatchFeature({'input_features': raw_speech})
# convert into correct format for padding
snake_case_ = self.pad(
lowerCAmelCase__, padding=lowerCAmelCase__, max_length=max_length if max_length else self.n_samples, truncation=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=return_attention_mask or do_normalize, )
# zero-mean and unit-variance normalization
if do_normalize:
snake_case_ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'], attention_mask=padded_inputs['attention_mask'], padding_value=self.padding_value, )
snake_case_ = np.stack(padded_inputs['input_features'], axis=0)
# make sure list is in array format
snake_case_ = padded_inputs.get('input_features').transpose(2, 0, 1)
snake_case_ = [self._np_extract_fbank_features(lowerCAmelCase__) for waveform in input_features[0]]
if isinstance(input_features[0], lowerCAmelCase__):
snake_case_ = [np.asarray(lowerCAmelCase__, dtype=np.floataa) for feature in input_features]
else:
snake_case_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
snake_case_ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(lowerCAmelCase__)
return padded_inputs
def a_ ( self) -> Dict[str, Any]:
snake_case_ = copy.deepcopy(self.__dict__)
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 361
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 312
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(), encoding='utf-8', check=lowerCAmelCase__, )
assert hasattr(self, 'env')
def a_ ( self, lowerCAmelCase__) -> str:
# configuration for running training on smdistributed Model Parallel
snake_case_ = {
'enabled': True,
'processes_per_host': 8,
}
snake_case_ = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
snake_case_ = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
snake_case_ = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}', instance_count=lowerCAmelCase__, instance_type=self.instance_type, debugger_hook_config=lowerCAmelCase__, hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
}, metric_definitions=self.env.metric_definitions, distribution=lowerCAmelCase__, py_version='py36', )
def a_ ( self, lowerCAmelCase__) -> List[str]:
TrainingJobAnalytics(lowerCAmelCase__).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(1,)])
def a_ ( self, lowerCAmelCase__) -> int:
# create estimator
snake_case_ = self.create_estimator(lowerCAmelCase__)
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds', 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json', 'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, lowerCAmelCase__)
| 363
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase ( UpperCAmelCase ) -> Dict:
# vision encoder
if "img_encoder.pos_embed" in name:
snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
snake_case_ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
snake_case_ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
snake_case_ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
snake_case_ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
snake_case_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
snake_case_ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
snake_case_ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case_ = orig_state_dict.pop(UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] )
snake_case_ = config.vision_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[dim : dim * 2, :]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
snake_case_ = key.split('.' )
snake_case_ = int(key_split[3] )
snake_case_ = config.text_config.hidden_size
if "weight" in key:
snake_case_ = val[:dim, :]
snake_case_ = val[
dim : dim * 2, :
]
snake_case_ = val[-dim:, :]
else:
snake_case_ = val[:dim]
snake_case_ = val[dim : dim * 2]
snake_case_ = val[-dim:]
else:
snake_case_ = rename_key(UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
snake_case_ = val.squeeze_()
else:
snake_case_ = val
return orig_state_dict
def UpperCAmelCase ( ) -> Any:
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int:
snake_case_ = GroupViTConfig()
snake_case_ = GroupViTModel(UpperCAmelCase ).eval()
snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model']
snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase )
snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0)
# verify result
snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ = prepare_img()
snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' )
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
snake_case_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
snake_case_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 )
processor.save_pretrained(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
print('Successfully saved processor and model to' , UpperCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase , organization='nielsr' )
model.push_to_hub(UpperCAmelCase , organization='nielsr' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__UpperCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 312
| 0
|
"""simple docstring"""
import torch
from transformers import AutoModel
class UpperCamelCase ( torch.nn.Module ):
def __init__( self, lowerCAmelCase__="sayef/fsner-bert-base-uncased") -> Optional[Any]:
super(lowerCAmelCase__, self).__init__()
snake_case_ = AutoModel.from_pretrained(lowerCAmelCase__, return_dict=lowerCAmelCase__)
snake_case_ = torch.nn.CosineSimilarity(3, 1e-08)
snake_case_ = torch.nn.Softmax(dim=1)
def a_ ( self, **lowerCAmelCase__) -> str:
return self.bert(**lowerCAmelCase__).last_hidden_state
def a_ ( self, lowerCAmelCase__) -> Any:
return token_embeddings.sum(2, keepdim=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> int:
return self.softmax(T * self.cos(lowerCAmelCase__, lowerCAmelCase__))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = W_supports['sizes'].tolist()
snake_case_ = W_supports['start_token_id'].item()
snake_case_ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
snake_case_ = self.BERT(**lowerCAmelCase__)
snake_case_ = self.BERT(**lowerCAmelCase__)
snake_case_ = None
snake_case_ = None
snake_case_ = W_supports['input_ids'] == start_token_id
snake_case_ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCAmelCase__):
if i == 0:
snake_case_ = 0
else:
snake_case_ = support_sizes[i - 1]
snake_case_ = S[s : s + size][start_token_masks[s : s + size]]
snake_case_ = S[s : s + size][end_token_masks[s : s + size]]
snake_case_ = torch.matmul(q[i], s_start.T).sum(1).softmax(0)
snake_case_ = torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
snake_case_ = torch.vstack((p_starts, p_start))
snake_case_ = torch.vstack((p_ends, p_end))
else:
snake_case_ = p_start
snake_case_ = p_end
return p_starts, p_ends
| 364
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 365
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
"""simple docstring"""
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCamelCase = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase ( UpperCAmelCase=None ) -> int:
if subparsers is not None:
snake_case_ = subparsers.add_parser('tpu-config' , description=_description )
else:
snake_case_ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
snake_case_ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=UpperCAmelCase , default=UpperCAmelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=UpperCAmelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=UpperCAmelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
snake_case_ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=UpperCAmelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def UpperCAmelCase ( UpperCAmelCase ) -> Tuple:
snake_case_ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCAmelCase ):
snake_case_ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case_ = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case_ = defaults.commands
if not args.tpu_name:
snake_case_ = defaults.tpu_name
if not args.tpu_zone:
snake_case_ = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case_ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
snake_case_ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , UpperCAmelCase ):
snake_case_ = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
snake_case_ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCAmelCase ):
snake_case_ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case_ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
snake_case_ = '; '.join(UpperCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case_ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(UpperCAmelCase )}' )
return
subprocess.run(UpperCAmelCase )
print('Successfully setup pod.' )
def UpperCAmelCase ( ) -> List[Any]:
snake_case_ = tpu_command_parser()
snake_case_ = parser.parse_args()
tpu_command_launcher(UpperCAmelCase )
| 366
|
"""simple docstring"""
__UpperCamelCase = 256
# Modulus to hash a string
__UpperCamelCase = 100_0003
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
snake_case_ = len(UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
if p_len > t_len:
return False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1
# Calculating the hash of pattern and substring of text
for i in range(UpperCAmelCase ):
snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case_ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
snake_case_ = 'abc1abc12'
snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case_ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 2)
snake_case_ = 'ABABX'
snake_case_ = 'ABABZABABYABABX'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 3)
snake_case_ = 'AAAB'
snake_case_ = 'ABAAAAAB'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 4)
snake_case_ = 'abcdabcy'
snake_case_ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
# Test 5)
snake_case_ = 'Lü'
snake_case_ = 'Lüsai'
assert rabin_karp(UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'Lue'
assert not rabin_karp(UpperCAmelCase , UpperCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 312
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = '''cpu'''
__UpperCamelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__UpperCamelCase = '''path-to-your-trained-model'''
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase = pipe.to(device)
# to channels last
__UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase = torch.randn(2, 4, 64, 64)
__UpperCamelCase = torch.rand(1) * 999
__UpperCamelCase = torch.randn(2, 77, 768)
__UpperCamelCase = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase = 666
__UpperCamelCase = torch.Generator(device).manual_seed(seed)
__UpperCamelCase = {'''generator''': generator}
if args.steps is not None:
__UpperCamelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 367
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "resnet"
SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"]
def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict:
super().__init__(**lowerCAmelCase__)
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
snake_case_ = num_channels
snake_case_ = embedding_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = layer_type
snake_case_ = hidden_act
snake_case_ = downsample_in_first_stage
snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)]
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-3
| 312
| 0
|
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = DistilBertTokenizer
SCREAMING_SNAKE_CASE_ = DistilBertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
@slow
def a_ ( self) -> str:
snake_case_ = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
snake_case_ = tokenizer.encode('sequence builders', add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.encode('multi-sequence build', add_special_tokens=lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 368
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 1000 ) -> int:
snake_case_ = 2**power
snake_case_ = 0
while n:
snake_case_ , snake_case_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 369
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {}
__UpperCamelCase = {}
__UpperCamelCase = {}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]:
snake_case_ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
snake_case_ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
snake_case_ = format_type
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]:
snake_case_ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
snake_case_ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
__UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
__UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
__UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter:
snake_case_ = get_format_type_from_alias(UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 312
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 370
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__UpperCamelCase = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__UpperCamelCase = {
'''RUCAIBox/mvp''': 1024,
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = MvpTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]:
super().__init__(
lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type'))
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**lowerCAmelCase__)
snake_case_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case_ = 'post_processor'
snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
if tokenizer_component_instance:
snake_case_ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case_ = tuple(state['sep'])
if "cls" in state:
snake_case_ = tuple(state['cls'])
snake_case_ = False
if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space:
snake_case_ = add_prefix_space
snake_case_ = True
if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets:
snake_case_ = trim_offsets
snake_case_ = True
if changes_to_apply:
snake_case_ = getattr(lowerCAmelCase__, state.pop('type'))
snake_case_ = component_class(**lowerCAmelCase__)
setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__)
@property
def a_ ( self) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value
snake_case_ = value
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding:
snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.')
return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str:
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 312
| 0
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 1
@register_to_config
def __init__( self, lowerCAmelCase__ = 1000, lowerCAmelCase__ = None) -> Dict:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(lowerCAmelCase__)
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
snake_case_ = 4
# running values
snake_case_ = []
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> int:
snake_case_ = num_inference_steps
snake_case_ = torch.linspace(1, 0, num_inference_steps + 1)[:-1]
snake_case_ = torch.cat([steps, torch.tensor([0.0])])
if self.config.trained_betas is not None:
snake_case_ = torch.tensor(self.config.trained_betas, dtype=torch.floataa)
else:
snake_case_ = torch.sin(steps * math.pi / 2) ** 2
snake_case_ = (1.0 - self.betas**2) ** 0.5
snake_case_ = (torch.atana(self.betas, self.alphas) / math.pi * 2)[:-1]
snake_case_ = timesteps.to(lowerCAmelCase__)
snake_case_ = []
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler')
snake_case_ = (self.timesteps == timestep).nonzero().item()
snake_case_ = timestep_index + 1
snake_case_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(lowerCAmelCase__)
if len(self.ets) == 1:
snake_case_ = self.ets[-1]
elif len(self.ets) == 2:
snake_case_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets) == 3:
snake_case_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
snake_case_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
snake_case_ = self._get_prev_sample(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> torch.FloatTensor:
return sample
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = self.alphas[timestep_index]
snake_case_ = self.betas[timestep_index]
snake_case_ = self.alphas[prev_timestep_index]
snake_case_ = self.betas[prev_timestep_index]
snake_case_ = (sample - sigma * ets) / max(lowerCAmelCase__, 1e-8)
snake_case_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self) -> Tuple:
return self.config.num_train_timesteps
| 371
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ) -> Dict:
snake_case_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=UpperCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=UpperCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=UpperCAmelCase )
return parser.parse_args()
def UpperCAmelCase ( ) -> Tuple:
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(UpperCAmelCase )
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 351
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 312
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 352
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "trajectory_transformer"
SCREAMING_SNAKE_CASE_ = ["past_key_values"]
SCREAMING_SNAKE_CASE_ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = action_weight
snake_case_ = reward_weight
snake_case_ = value_weight
snake_case_ = max_position_embeddings
snake_case_ = block_size
snake_case_ = action_dim
snake_case_ = observation_dim
snake_case_ = transition_dim
snake_case_ = learning_rate
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_embd
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = resid_pdrop
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = kaiming_initializer_range
snake_case_ = use_cache
super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
| 312
| 0
|
import functools
from typing import Any
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or len(UpperCAmelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(
isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
snake_case_ = {}
snake_case_ = 'WORD_KEEPER'
for word in words:
snake_case_ = trie
for c in word:
if c not in trie_node:
snake_case_ = {}
snake_case_ = trie_node[c]
snake_case_ = True
snake_case_ = len(UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(UpperCAmelCase ) -> bool:
if index == len_string:
return True
snake_case_ = trie
for i in range(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = trie_node.get(string[i] , UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(UpperCAmelCase , UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["keras_nlp"]
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
requires_backends(self, ['keras_nlp'])
| 312
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__UpperCamelCase = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 0
|
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
snake_case_ = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
AutoTokenizer.from_pretrained(UpperCAmelCase ).save_pretrained(UpperCAmelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 355
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312
| 0
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase ( UpperCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def UpperCAmelCase ( UpperCAmelCase ) -> np.ndarray:
return vector * sigmoid(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
"""simple docstring"""
import functools
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int:
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
snake_case_ = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "codegen"
SCREAMING_SNAKE_CASE_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, lowerCAmelCase__=5_0400, lowerCAmelCase__=2048, lowerCAmelCase__=2048, lowerCAmelCase__=4096, lowerCAmelCase__=28, lowerCAmelCase__=16, lowerCAmelCase__=64, lowerCAmelCase__=None, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Optional[Any]:
snake_case_ = vocab_size
snake_case_ = n_ctx
snake_case_ = n_positions
snake_case_ = n_embd
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = n_inner
snake_case_ = rotary_dim
snake_case_ = activation_function
snake_case_ = resid_pdrop
snake_case_ = embd_pdrop
snake_case_ = attn_pdrop
snake_case_ = layer_norm_epsilon
snake_case_ = initializer_range
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, tie_word_embeddings=lowerCAmelCase__, **lowerCAmelCase__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "default", lowerCAmelCase__ = None, lowerCAmelCase__ = False, ) -> Tuple:
super().__init__(lowerCAmelCase__, task=lowerCAmelCase__, patching_specs=lowerCAmelCase__, use_past=lowerCAmelCase__)
if not getattr(self._config, 'pad_token_id', lowerCAmelCase__):
# TODO: how to do that better?
snake_case_ = 0
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
snake_case_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__, direction='inputs')
snake_case_ = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case_ = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a_ ( self) -> int:
return self._config.n_layer
@property
def a_ ( self) -> int:
return self._config.n_head
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, ) -> Mapping[str, Any]:
snake_case_ = super(lowerCAmelCase__, self).generate_dummy_inputs(
lowerCAmelCase__, batch_size=lowerCAmelCase__, seq_length=lowerCAmelCase__, is_pair=lowerCAmelCase__, framework=lowerCAmelCase__)
# We need to order the input in the way they appears in the forward()
snake_case_ = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
snake_case_ , snake_case_ = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case_ = [
(torch.zeros(lowerCAmelCase__), torch.zeros(lowerCAmelCase__)) for _ in range(self.num_layers)
]
snake_case_ = common_inputs['attention_mask']
if self.use_past:
snake_case_ = ordered_inputs['attention_mask'].dtype
snake_case_ = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase__, lowerCAmelCase__, dtype=lowerCAmelCase__)], dim=1)
return ordered_inputs
@property
def a_ ( self) -> int:
return 13
| 357
|
"""simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312
| 0
|
"""simple docstring"""
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase = getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 8 , UpperCAmelCase = 1024 , UpperCAmelCase="val" , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase="summarization" , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase = None , UpperCAmelCase="" , **UpperCAmelCase , ) -> Dict:
snake_case_ = str(UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=UpperCAmelCase )
snake_case_ = Path(UpperCAmelCase )
snake_case_ = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(UpperCAmelCase )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase ).cuda()
if fpaa:
snake_case_ = model.half()
# determine if we need to increase num_beams
use_task_specific_params(UpperCAmelCase , UpperCAmelCase ) # update config with task specific params
snake_case_ = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
snake_case_ = num_return_sequences
snake_case_ = AutoTokenizer.from_pretrained(UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
snake_case_ = tokenizer.model_max_length
if prefix is None:
snake_case_ = prefix or getattr(model.config , 'prefix' , '' ) or ''
snake_case_ = SeqaSeqDataset(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , max_target_length=1024 , type_path=UpperCAmelCase , n_obs=UpperCAmelCase , prefix=UpperCAmelCase , **UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
snake_case_ = ds.make_sortish_sampler(UpperCAmelCase , distributed=UpperCAmelCase , add_extra_examples=UpperCAmelCase , shuffle=UpperCAmelCase )
snake_case_ = DataLoader(UpperCAmelCase , sampler=UpperCAmelCase , batch_size=UpperCAmelCase , collate_fn=ds.collate_fn )
snake_case_ = []
for batch in tqdm(UpperCAmelCase ):
snake_case_ = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , **UpperCAmelCase , )
snake_case_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
snake_case_ = batch['ids']
if num_return_sequences > 1:
snake_case_ = chunks(UpperCAmelCase , UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(UpperCAmelCase ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(UpperCAmelCase , UpperCAmelCase )
return results, sampler.num_replicas
def UpperCAmelCase ( ) -> Dict:
snake_case_ = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=UpperCAmelCase , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=UpperCAmelCase , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=UpperCAmelCase , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument(
'--type_path' , type=UpperCAmelCase , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=UpperCAmelCase , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=UpperCAmelCase , default=8 , required=UpperCAmelCase , help='batch size' )
parser.add_argument(
'--local_rank' , type=UpperCAmelCase , default=-1 , required=UpperCAmelCase , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=UpperCAmelCase , default=1 , required=UpperCAmelCase , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=UpperCAmelCase , default=600 , required=UpperCAmelCase , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase )
parser.add_argument('--tgt_lang' , type=UpperCAmelCase , default=UpperCAmelCase , required=UpperCAmelCase )
parser.add_argument(
'--prefix' , type=UpperCAmelCase , required=UpperCAmelCase , default=UpperCAmelCase , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
snake_case_ = time.time()
snake_case_ , snake_case_ = parser.parse_known_args()
snake_case_ = parse_numeric_n_bool_cl_kwargs(UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
snake_case_ = Path(args.save_dir + '_tmp' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) # this handles locking.
snake_case_ = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
snake_case_ = {}
if args.src_lang is not None:
snake_case_ = args.src_lang
if args.tgt_lang is not None:
snake_case_ = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=UpperCAmelCase )
snake_case_ , snake_case_ = eval_data_dir(
args.data_dir , UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=UpperCAmelCase , **UpperCAmelCase , )
if args.local_rank <= 0:
snake_case_ = Path(args.save_dir )
save_dir.mkdir(exist_ok=UpperCAmelCase )
snake_case_ = gather_results_from_each_node(UpperCAmelCase , UpperCAmelCase , args.sync_timeout )
snake_case_ = combine_partial_results(UpperCAmelCase )
if args.num_return_sequences > 1:
snake_case_ = save_dir.joinpath('pseudolabel_results.json' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(UpperCAmelCase , UpperCAmelCase )
return
snake_case_ = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(UpperCAmelCase ) as f:
snake_case_ = [x.rstrip() for x in f.readlines()][: len(UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
snake_case_ = 'translation' in args.task
snake_case_ = calculate_bleu if calc_bleu else calculate_rouge
snake_case_ = 'bleu' if calc_bleu else 'rouge'
snake_case_ = score_fn(UpperCAmelCase , UpperCAmelCase )
snake_case_ = len(UpperCAmelCase )
snake_case_ = time.time() - start_time
snake_case_ = round(runtime / metrics['n_obs'] , 4 )
snake_case_ = num_replicas
# TODO(@stas00): add whatever metadata to metrics
snake_case_ = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase )
print(UpperCAmelCase )
write_txt_file(UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> List:
snake_case_ = []
for partial_result in partial_results:
records.extend(UpperCAmelCase )
snake_case_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["id"] )
snake_case_ = [x['pred'] for x in records]
return preds
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
snake_case_ = time.time()
logger.info('waiting for all nodes to finish' )
snake_case_ = None
while (time.time() - start_wait) < timeout:
snake_case_ = list(save_dir.glob('rank_*.json' ) )
if len(UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
snake_case_ = lmap(UpperCAmelCase , UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 358
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = str(UpperCAmelCase )
dataset_info.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfo.from_directory(UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) )
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCAmelCase )
snake_case_ = yaml.safe_load(UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ) -> Optional[Any]:
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = str(UpperCAmelCase )
dataset_infos_dict.write_to_directory(UpperCAmelCase )
snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
| 312
| 0
|
"""simple docstring"""
from math import pi
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 359
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_ = frozenset([] )
def a_ ( self) -> Any:
torch.manual_seed(0)
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, )
snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0)
snake_case_ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
snake_case_ = CLIPTextModel(lowerCAmelCase__)
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
snake_case_ = image.cpu().permute(0, 2, 3, 1)[0]
snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64))
snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64))
if str(lowerCAmelCase__).startswith('mps'):
snake_case_ = torch.manual_seed(lowerCAmelCase__)
else:
snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Dict:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__)
snake_case_ = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs(lowerCAmelCase__)
snake_case_ = sd_pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def a_ ( self) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self) -> Union[str, Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def a_ ( self) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png')
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler')
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 312
| 0
|
"""simple docstring"""
__UpperCamelCase = {
'''km/h''': 1.0,
'''m/s''': 3.6,
'''mph''': 1.609344,
'''knot''': 1.852,
}
__UpperCamelCase = {
'''km/h''': 1.0,
'''m/s''': 0.277777778,
'''mph''': 0.621371192,
'''knot''': 0.539956803,
}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
snake_case_ = (
f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'
f'Valid values are: {", ".join(UpperCAmelCase )}'
)
raise ValueError(UpperCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCamelCase = logging.getLogger(__name__)
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any:
return (preds == labels).mean()
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ = processors[data_args.task_name]()
snake_case_ = processor.get_labels()
snake_case_ = len(UpperCAmelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )}
# Data collator
snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase )
return results
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 312
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.