code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase = False ) -> dict:
lowerCamelCase__ : dict = {i: [] for i in range(UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase ):
for j in range(i + 1 , UpperCamelCase ):
if random.random() < probability:
graph[i].append(UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase )
return graph
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> dict:
return {
i: [j for j in range(UpperCamelCase ) if i != j] for i in range(UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( __lowercase ):
SCREAMING_SNAKE_CASE_ = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ = "ViTImageProcessor"
SCREAMING_SNAKE_CASE_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Tuple:
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _a, )
snake_case_ = kwargs.pop('feature_extractor')
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_a, _a)
def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Dict:
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.')
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.')
if text is not None:
snake_case_ = self.tokenizer(_a, return_tensors=_a, **_a)
if visual_prompt is not None:
snake_case_ = self.image_processor(_a, return_tensors=_a, **_a)
if images is not None:
snake_case_ = self.image_processor(_a, return_tensors=_a, **_a)
if visual_prompt is not None and images is not None:
snake_case_ = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case_ = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_a), tensor_type=_a)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]:
return self.tokenizer.batch_decode(*_a, **_a)
def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]:
return self.tokenizer.decode(*_a, **_a)
@property
def a_ ( self) -> List[str]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', _a, )
return self.image_processor_class
@property
def a_ ( self) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', _a, )
return self.image_processor
| 350
|
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCamelCase = False
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''ybelkada/fonts'''
def UpperCAmelCase ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '
'Pix2StructImageProcessor. Please upgrade torch.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
requires_backends(UpperCAmelCase , ['torch'] )
_check_torch_version()
snake_case_ = image_tensor.unsqueeze(0 )
snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 )
snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image:
requires_backends(UpperCAmelCase , 'vision' )
# Add new lines so that each line is no more than 80 characters.
snake_case_ = textwrap.TextWrapper(width=80 )
snake_case_ = wrapper.wrap(text=UpperCAmelCase )
snake_case_ = '\n'.join(UpperCAmelCase )
if font_bytes is not None and font_path is None:
snake_case_ = io.BytesIO(UpperCAmelCase )
elif font_path is not None:
snake_case_ = font_path
else:
snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' )
snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase )
# Create the actual image with a bit of padding around the text.
snake_case_ = text_width + left_padding + right_padding
snake_case_ = text_height + top_padding + bottom_padding
snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase )
snake_case_ = ImageDraw.Draw(UpperCAmelCase )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase )
return image
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any:
requires_backends(UpperCAmelCase , 'vision' )
# Convert to PIL image if necessary
snake_case_ = to_pil_image(UpperCAmelCase )
snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase )
snake_case_ = max(header_image.width , image.width )
snake_case_ = int(image.height * (new_width / image.width) )
snake_case_ = int(header_image.height * (new_width / header_image.width) )
snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
snake_case_ = to_numpy_array(UpperCAmelCase )
if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST:
snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST )
return new_image
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = ["flattened_patches"]
def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None:
super().__init__(**lowerCAmelCase__)
snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
snake_case_ = do_normalize
snake_case_ = do_convert_rgb
snake_case_ = max_patches
snake_case_ = is_vqa
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray:
requires_backends(self.extract_flattened_patches, 'torch')
_check_torch_version()
# convert to torch
snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST)
snake_case_ = torch.from_numpy(lowerCAmelCase__)
snake_case_ , snake_case_ = patch_size['height'], patch_size['width']
snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__)
# maximize scale s.t.
snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width))
snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1)
snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1)
snake_case_ = max(num_feasible_rows * patch_height, 1)
snake_case_ = max(num_feasible_cols * patch_width, 1)
snake_case_ = torch.nn.functional.interpolate(
image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0)
# [1, rows, columns, patch_height * patch_width * image_channels]
snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = patches.shape
snake_case_ = patches_shape[1]
snake_case_ = patches_shape[2]
snake_case_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
snake_case_ = patches.reshape([rows * columns, depth])
# [rows * columns, 1]
snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1])
snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1])
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
snake_case_ = row_ids.to(torch.floataa)
snake_case_ = col_ids.to(torch.floataa)
# [rows * columns, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.cat([row_ids, col_ids, patches], -1)
# [max_patches, 2 + patch_height * patch_width * image_channels]
snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float()
snake_case_ = to_numpy_array(lowerCAmelCase__)
return result
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray:
if image.dtype == np.uinta:
snake_case_ = image.astype(np.floataa)
# take mean across the whole `image`
snake_case_ = np.mean(lowerCAmelCase__)
snake_case_ = np.std(lowerCAmelCase__)
snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape)))
return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput:
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ = patch_size if patch_size is not None else self.patch_size
snake_case_ = max_patches if max_patches is not None else self.max_patches
snake_case_ = self.is_vqa
if kwargs.get('data_format', lowerCAmelCase__) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ')
snake_case_ = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.')
snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__)
snake_case_ = kwargs.pop('font_path', lowerCAmelCase__)
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = [header_text] * len(lowerCAmelCase__)
snake_case_ = [
render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__)
for i, image in enumerate(lowerCAmelCase__)
]
if do_normalize:
snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images]
# convert to torch tensor and permute
snake_case_ = [
self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__)
for image in images
]
# create attention mask in numpy
snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images]
snake_case_ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__)
return encoded_outputs
| 312
| 0
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
UpperCamelCase : Dict = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
lowercase = None
def A ( snake_case :"pyspark.sql.DataFrame" , snake_case :List[int] , ) -> int:
import pyspark
def generate_fn():
__UpperCamelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__UpperCamelCase = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' )
__UpperCamelCase = partition_df.collect()
__UpperCamelCase = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __lowerCAmelCase ( _BaseExamplesIterable ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , ):
'''simple docstring'''
__UpperCamelCase = df
__UpperCamelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__UpperCamelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.split_shard_indices_by_worker(__UpperCAmelCase , __UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return len(self.partition_order )
class __lowerCAmelCase ( datasets.DatasetBuilder ):
lowercase = SparkConfig
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
import pyspark
__UpperCamelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__UpperCamelCase = df
__UpperCamelCase = working_dir
super().__init__(
cache_dir=__UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
def create_cache_and_write_probe(__UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__UpperCAmelCase )
__UpperCamelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__UpperCAmelCase , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__UpperCamelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(__UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__UpperCamelCase = self.df.count()
__UpperCamelCase = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__UpperCamelCase = (
self.df.limit(__UpperCAmelCase )
.repartition(1 )
.mapInArrow(__UpperCAmelCase , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__UpperCamelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__UpperCamelCase = min(__UpperCAmelCase , int(approx_total_size / max_shard_size ) )
__UpperCamelCase = self.df.repartition(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
import pyspark
__UpperCamelCase = ParquetWriter if file_format == 'parquet' else ArrowWriter
__UpperCamelCase = os.path.join(self._working_dir , os.path.basename(__UpperCAmelCase ) ) if self._working_dir else fpath
__UpperCamelCase = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__UpperCamelCase = self.config.features
__UpperCamelCase = self._writer_batch_size
__UpperCamelCase = self._fs.storage_options
def write_arrow(__UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__UpperCamelCase = pyspark.TaskContext().taskAttemptId()
__UpperCamelCase = next(__UpperCAmelCase , __UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__UpperCamelCase = 0
__UpperCamelCase = writer_class(
features=__UpperCAmelCase , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
__UpperCamelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__UpperCamelCase , __UpperCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__UpperCamelCase = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , )
__UpperCamelCase = pa.Table.from_batches([batch] )
writer.write_table(__UpperCAmelCase )
if writer._num_bytes > 0:
__UpperCamelCase , __UpperCamelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__UpperCAmelCase ) ):
__UpperCamelCase = os.path.join(os.path.dirname(__UpperCAmelCase ) , os.path.basename(__UpperCAmelCase ) )
shutil.move(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = (
self.df.mapInArrow(__UpperCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "arrow" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
self._validate_cache_dir()
__UpperCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__UpperCAmelCase )
__UpperCamelCase = not is_remote_filesystem(self._fs )
__UpperCamelCase = os.path.join if is_local else posixpath.join
__UpperCamelCase = '-TTTTT-SSSSS-of-NNNNN'
__UpperCamelCase = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
__UpperCamelCase = path_join(self._output_dir , __UpperCAmelCase )
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = []
for task_id, content in self._prepare_split_single(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__UpperCAmelCase )
__UpperCamelCase = total_num_examples
__UpperCamelCase = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
__UpperCamelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__UpperCamelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
rename(
__UpperCAmelCase , fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , F'{global_shard_id:05d}' ).replace('NNNNN' , F'{total_shards:05d}' ) , )
__UpperCamelCase = []
__UpperCamelCase = 0
for i in range(len(__UpperCAmelCase ) ):
__UpperCamelCase , __UpperCamelCase = task_id_and_num_shards[i]
for shard_id in range(__UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__UpperCAmelCase , len(__UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*__UpperCAmelCase ) ).collect()
else:
# don't use any pattern
__UpperCamelCase = 0
__UpperCamelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace(__UpperCAmelCase , '' ) , )
def UpperCAmelCase ( self , __UpperCAmelCase , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str:
__UpperCamelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def A ( snake_case :List[Any] ) -> int:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def A ( snake_case :str ) -> Union[str, Any]:
__UpperCamelCase = {}
__UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
__UpperCamelCase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
__UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
__UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 )
__UpperCamelCase = value.float()
return upgrade
@torch.no_grad()
def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int:
from dall_e import Encoder
__UpperCamelCase = Encoder()
if os.path.exists(snake_case ):
__UpperCamelCase = torch.load(snake_case )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
__UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
__UpperCamelCase = FlavaImageCodebookConfig()
__UpperCamelCase = FlavaImageCodebook(snake_case ).eval()
__UpperCamelCase = encoder.state_dict()
__UpperCamelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(snake_case )
__UpperCamelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 1
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 366
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ = CLIPImageProcessor()
UpperCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 30
| 0
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCamelCase : List[Any] = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
_lowerCamelCase : str = f'''{src_lang}-{tgt_lang}'''
_lowerCamelCase : Tuple = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
_lowerCamelCase : int = os.path.join(lowercase__ , 'README.md' )
print(f'''Generating {path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
# make sure we are under the root of the project
lowercase__ = Path(__file__).resolve().parent.parent.parent
lowercase__ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 96
|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_lowerCamelCase : List[Any] = (boundary[1] - boundary[0]) / steps
_lowerCamelCase : Tuple = boundary[0]
_lowerCamelCase : Dict = boundary[1]
_lowerCamelCase : List[Any] = make_points(lowercase__ , lowercase__ , lowercase__ )
_lowerCamelCase : List[Any] = 0.0
y += (h / 2.0) * f(lowercase__ )
for i in x_i:
# print(i)
y += h * f(lowercase__ )
y += (h / 2.0) * f(lowercase__ )
return y
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = a + h
while x < (b - h):
yield x
_lowerCamelCase : int = x + h
def _snake_case ( lowercase__ ): # enter your function here
_lowerCamelCase : Optional[Any] = (x - 0) * (x - 0)
return y
def _snake_case ( ):
_lowerCamelCase : int = 0.0 # Lower bound of integration
_lowerCamelCase : Optional[int] = 1.0 # Upper bound of integration
_lowerCamelCase : List[str] = 1_0.0 # define number of steps or resolution
_lowerCamelCase : List[Any] = [a, b] # define boundary of integration
_lowerCamelCase : Optional[Any] = method_a(lowercase__ , lowercase__ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 96
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 367
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 187
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : bool =field(default=UpperCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool =field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowerCamelCase_ =import_module('''tasks''' )
try:
lowerCamelCase_ =getattr(UpperCamelCase__ , model_args.task_type )
lowerCamelCase_ =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase_ =token_classification_task.get_labels(data_args.labels )
lowerCamelCase_ =dict(enumerate(UpperCamelCase__ ) )
lowerCamelCase_ =len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase_ =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase_ =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase_ =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Tuple[List[int], List[int]]:
lowerCamelCase_ =np.argmax(UpperCamelCase__ , axis=2 )
lowerCamelCase_, lowerCamelCase_ =preds.shape
lowerCamelCase_ =[[] for _ in range(UpperCamelCase__ )]
lowerCamelCase_ =[[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__snake_case : Dict ) -> Dict:
lowerCamelCase_, lowerCamelCase_ =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
lowerCamelCase_ =DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase_ =Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ =trainer.evaluate()
lowerCamelCase_ =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
lowerCamelCase_ =TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =trainer.predict(UpperCamelCase__ )
lowerCamelCase_, lowerCamelCase_ =align_predictions(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def a_ ( __snake_case : Dict ) -> str:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _a ( SCREAMING_SNAKE_CASE : List[Any] ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _a ( SCREAMING_SNAKE_CASE : Tuple ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __magic_name__ :
A: int
A: str
class __magic_name__ ( __lowerCAmelCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : int = 1
UpperCamelCase__ : List[str] = [1, 2]
UpperCamelCase__ : Union[str, Any] = {'''a''': 1, '''b''': 2}
UpperCamelCase__ : Tuple = {'''a''': [1, 2], '''b''': [3, 4]}
UpperCamelCase__ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
UpperCamelCase__ : int = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : List[Any] = 2
UpperCamelCase__ : Tuple = [2, 3]
UpperCamelCase__ : Any = {'''a''': 2, '''b''': 3}
UpperCamelCase__ : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
UpperCamelCase__ : List[Any] = {'''a''': {'''1''': 2}, '''b''': 3}
UpperCamelCase__ : Union[str, Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = 2
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
UpperCamelCase__ : Any = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
UpperCamelCase__ : Optional[int] = {'''a''': 2, '''b''': 0, '''c''': 2}
UpperCamelCase__ : str = {
'''a''': np.eye(2 ).astype(lowerCamelCase__ ),
'''b''': np.zeros(3 ).astype(lowerCamelCase__ ),
'''c''': np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda lowerCamelCase__ : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = {'''a''': 1, '''b''': 2}
UpperCamelCase__ : List[Any] = {'''a''': 3, '''b''': 4}
UpperCamelCase__ : Union[str, Any] = {'''a''': 5, '''b''': 6}
UpperCamelCase__ : Tuple = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
class __magic_name__ :
A: Dict = "bar"
UpperCamelCase__ : int = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowerCamelCase__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
UpperCamelCase__ : str = {F"{i}": i for i in range(SCREAMING_SNAKE_CASE )}
UpperCamelCase__ : Any = map_nested(lambda SCREAMING_SNAKE_CASE : x + 10 , SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __magic_name__ ( __lowerCAmelCase):
@require_tf
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase__ : Any = layers.Dense(2 )
def gen_random_output():
UpperCamelCase__ : List[Any] = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
UpperCamelCase__ : Dict = gen_random_output()
UpperCamelCase__ : int = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
import torch
def gen_random_output():
UpperCamelCase__ : Optional[Any] = torch.nn.Linear(3 , 2 )
UpperCamelCase__ : Optional[int] = torch.rand(1 , 3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
UpperCamelCase__ : Any = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
UpperCamelCase__ : int = gen_random_output()
UpperCamelCase__ : Union[str, Any] = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase__ : Tuple = gen_random_output()
with temp_seed(42 ):
UpperCamelCase__ : List[str] = gen_random_output()
UpperCamelCase__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : str = NestedDataStructure(SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = NestedDataStructure(SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = A(x=1 , y='''foobar''' )
UpperCamelCase__ : Optional[Any] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
UpperCamelCase__ : Optional[int] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
UpperCamelCase__ : Tuple = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y='''foo''' )] )
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return text.split()
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _a ( ):
"""simple docstring"""
with Pool(2 ) as pool:
UpperCamelCase__ : List[Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase__ : Union[str, Any] = list(iflatmap_unordered(SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase__ : str = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(SCREAMING_SNAKE_CASE ) == 4
| 369
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE : Union[tf.Tensor, np.ndarray] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
return list(tensor.shape )
UpperCamelCase__ : List[Any] = tf.shape(SCREAMING_SNAKE_CASE )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE ):
return dynamic
UpperCamelCase__ : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE )]
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int=1E-5 , SCREAMING_SNAKE_CASE : Any=-1 ):
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCamelCase__ , UpperCamelCase__ : Dict = tf.nn.moments(SCREAMING_SNAKE_CASE , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCamelCase__ : Tuple = [1] * inputs.shape.rank
UpperCamelCase__ : List[str] = shape_list(SCREAMING_SNAKE_CASE )[axis]
UpperCamelCase__ : Union[str, Any] = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Compute layer normalization using the batch_normalization
# function.
UpperCamelCase__ : List[Any] = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , offset=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , variance_epsilon=SCREAMING_SNAKE_CASE , )
return outputs
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Tuple=-1 ):
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCamelCase__ : Union[str, Any] = tf.shape(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCamelCase__ : Tuple = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ):
UpperCamelCase__ : int = tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCamelCase__ : List[Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCamelCase__ : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCamelCase__ : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( SCREAMING_SNAKE_CASE : tf.Tensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = "input_ids" ):
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE , tf.cast(SCREAMING_SNAKE_CASE , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE )}) must be smaller than the embedding "
F"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ : str = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCamelCase__ : List[str] = [x for x in data if len(SCREAMING_SNAKE_CASE ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCamelCase__ : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = 1
UpperCamelCase__ : Tuple = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCamelCase__ : Any = np.array_split(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[int] = chunk_data
else:
UpperCamelCase__ : List[Any] = data
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if name in group.attrs:
UpperCamelCase__ : List[Any] = [n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(SCREAMING_SNAKE_CASE , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
def _expand_single_ad_tensor(SCREAMING_SNAKE_CASE : str ):
if isinstance(SCREAMING_SNAKE_CASE , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE )
| 51
| 0
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if dataset.ndim != value_array.ndim:
A_ : Optional[Any] = (
'''Wrong input data\'s dimensions... '''
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
A_ : Any = (
'''Wrong input data\'s shape... '''
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
A_ : Union[str, Any] = (
'''Input data have different datatype... '''
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = []
for value in value_array:
A_ : Tuple = euclidean(SCREAMING_SNAKE_CASE , dataset[0] )
A_ : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
A_ : List[Any] = euclidean(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if dist > temp_dist:
A_ : Dict = temp_dist
A_ : int = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / (norm(SCREAMING_SNAKE_CASE ) * norm(SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A_ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
A_ : List[str] = truncation
A_ : str = tokenize_kwargs
A_ : Optional[Any] = {}
if return_tensors is not None:
A_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Dict[str, GenericTensor]:
'''simple docstring'''
A_ : str = self.framework
A_ : Any = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_inputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->List[Any]:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 186
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'glpn'
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=[2, 2, 2, 2] , __lowerCAmelCase=[8, 4, 2, 1] , __lowerCAmelCase=[32, 64, 160, 256] , __lowerCAmelCase=[7, 3, 3, 3] , __lowerCAmelCase=[4, 2, 2, 2] , __lowerCAmelCase=[1, 2, 5, 8] , __lowerCAmelCase=[4, 4, 4, 4] , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=64 , __lowerCAmelCase=10 , __lowerCAmelCase=-1 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = num_channels
lowercase = num_encoder_blocks
lowercase = depths
lowercase = sr_ratios
lowercase = hidden_sizes
lowercase = patch_sizes
lowercase = strides
lowercase = mlp_ratios
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = drop_path_rate
lowercase = layer_norm_eps
lowercase = decoder_hidden_size
lowercase = max_depth
lowercase = head_in_index
| 362
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_A : Optional[Any] ='''
Human: <<task>>
Assistant: '''
_A : List[str] ='''huggingface-tools/default-prompts'''
_A : int ={'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]:
if prompt_or_repo_id is None:
lowerCamelCase__ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , UpperCamelCase ) is not None:
return prompt_or_repo_id
lowerCamelCase__ : str = cached_file(
UpperCamelCase , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 41
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ):
'''simple docstring'''
lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_00 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : Tuple = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 204
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
__SCREAMING_SNAKE_CASE = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , UpperCAmelCase__ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
# load decoder from hub
__SCREAMING_SNAKE_CASE = "hf-internal-testing/ngram-beam-search-decoder"
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Dict ) -> Any:
__SCREAMING_SNAKE_CASE = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCAmelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCAmelCase__ : Optional[Any] ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , **UpperCAmelCase__ : int ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(UpperCAmelCase__ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=UpperCAmelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_list((3, 1_0_0_0) )
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCAmelCase__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE = processor(UpperCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "This is a test string"
__SCREAMING_SNAKE_CASE = processor(text=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Dict=(2, 1_0, 1_6) , UpperCAmelCase__ : str=7_7 ) -> List[Any]:
np.random.seed(UpperCAmelCase__ )
return np.random.rand(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__SCREAMING_SNAKE_CASE = processor.decode(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = decoder.decode_beams(UpperCAmelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase__ )
else:
with get_context(UpperCAmelCase__ ).Pool() as pool:
__SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
with get_context("fork" ).Pool() as p:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCAmelCase__ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(UpperCAmelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCAmelCase__ , decoded_processor.lm_score )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = 1_5
__SCREAMING_SNAKE_CASE = -20.0
__SCREAMING_SNAKE_CASE = -4.0
__SCREAMING_SNAKE_CASE = processor.batch_decode(
UpperCAmelCase__ , beam_width=UpperCAmelCase__ , beam_prune_logp=UpperCAmelCase__ , token_min_logp=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = decoded_processor_out.text
__SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
with get_context("fork" ).Pool() as pool:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
UpperCAmelCase__ , UpperCAmelCase__ , beam_width=UpperCAmelCase__ , beam_prune_logp=UpperCAmelCase__ , token_min_logp=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
__SCREAMING_SNAKE_CASE = [d[0][2] for d in decoded_decoder_out]
__SCREAMING_SNAKE_CASE = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , UpperCAmelCase__ )
self.assertTrue(np.array_equal(UpperCAmelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCAmelCase__ , atol=1E-3 ) )
self.assertTrue(np.array_equal(UpperCAmelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , UpperCAmelCase__ , atol=1E-3 ) )
def UpperCAmelCase_ ( self : str ) -> Dict:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = 2.0
__SCREAMING_SNAKE_CASE = 5.0
__SCREAMING_SNAKE_CASE = -20.0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = processor.batch_decode(
UpperCAmelCase__ , alpha=UpperCAmelCase__ , beta=UpperCAmelCase__ , unk_score_offset=UpperCAmelCase__ , lm_score_boundary=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = decoded_processor_out.text
__SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
decoder.reset_params(
alpha=UpperCAmelCase__ , beta=UpperCAmelCase__ , unk_score_offset=UpperCAmelCase__ , lm_score_boundary=UpperCAmelCase__ , )
with get_context("fork" ).Pool() as pool:
__SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
UpperCAmelCase__ , UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
__SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__SCREAMING_SNAKE_CASE = os.listdir(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = snapshot_download("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
__SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
__SCREAMING_SNAKE_CASE = os.listdir(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = os.listdir(UpperCAmelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = floats_list((3, 1_0_0_0) )
__SCREAMING_SNAKE_CASE = processor_wavaveca(UpperCAmelCase__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE = processor_auto(UpperCAmelCase__ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = processor_wavaveca.batch_decode(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = processor_auto.batch_decode(UpperCAmelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase_ ( self : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.get_feature_extractor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_decoder()
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def UpperCAmelCase_ ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()[0]
__SCREAMING_SNAKE_CASE = processor.decode(UpperCAmelCase__ , output_word_offsets=UpperCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
__SCREAMING_SNAKE_CASE = self._get_dummy_logits()
__SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCAmelCase__ , output_word_offsets=UpperCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(UpperCAmelCase__ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
import torch
__SCREAMING_SNAKE_CASE = load_dataset("common_voice" , "en" , split="train" , streaming=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__SCREAMING_SNAKE_CASE = iter(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = next(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__SCREAMING_SNAKE_CASE = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).logits.cpu().numpy()
__SCREAMING_SNAKE_CASE = processor.decode(logits[0] , output_word_offsets=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__SCREAMING_SNAKE_CASE = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
__SCREAMING_SNAKE_CASE = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(UpperCAmelCase__ , "word" ) ) , UpperCAmelCase__ )
self.assertEqual(" ".join(self.get_from_offsets(UpperCAmelCase__ , "word" ) ) , output.text )
# output times
__SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(UpperCAmelCase__ , "start_time" ) )
__SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(UpperCAmelCase__ , "end_time" ) )
# fmt: off
__SCREAMING_SNAKE_CASE = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__SCREAMING_SNAKE_CASE = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=0.01 ) )
| 195
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 195
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_snake_case : Tuple = True
except (ImportError, AttributeError):
_snake_case : Any = object
def a_ ( *lowerCAmelCase_ : Optional[Any], **lowerCAmelCase_ : int ):
pass
_snake_case : Any = False
_snake_case : Optional[int] = logging.get_logger('transformers-cli/serving')
def a_ ( lowerCAmelCase_ : Namespace ):
__lowerCAmelCase = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCAmelCase_, args.host, args.port, args.workers )
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Tuple:
__lowerCAmelCase = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8_8_8_8 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Optional[int] , lowerCAmelCase_ : Pipeline , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Optional[int]:
__lowerCAmelCase = pipeline
__lowerCAmelCase = host
__lowerCAmelCase = port
__lowerCAmelCase = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__lowerCAmelCase = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ),
] , timeout=6_0_0 , )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def lowercase ( self : int ) -> List[str]:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any:
try:
__lowerCAmelCase = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ )
if return_ids:
__lowerCAmelCase = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[int] = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ : bool = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Optional[Any]:
try:
__lowerCAmelCase = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={'model': '', 'error': str(lowerCAmelCase_ )} )
async def lowercase ( self : Tuple , lowerCAmelCase_ : str=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Optional[int]:
# Check we don't have empty string
if len(lowerCAmelCase_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__lowerCAmelCase = self._pipeline(lowerCAmelCase_ )
return ServeForwardResult(output=lowerCAmelCase_ )
except Exception as e:
raise HTTPException(5_0_0 , {'error': str(lowerCAmelCase_ )} )
| 284
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284
| 1
|
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(snake_case_,snake_case_ ):
_A : List[str] = f'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
_A : Dict = f'''Input value of [number={number}] must be > 0'''
raise ValueError(snake_case_ )
_A : List[str] = 1
for i in range(1,snake_case_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=32 , _a=3 , _a=4 , _a=[10, 20, 30, 40] , _a=[2, 2, 3, 2] , _a=True , _a=True , _a=37 , _a="gelu" , _a=10 , _a=0.02 , _a=["stage2", "stage3", "stage4"] , _a=[2, 3, 4] , _a=None , ) -> List[Any]:
_A : Tuple = parent
_A : Any = batch_size
_A : int = image_size
_A : Tuple = num_channels
_A : List[Any] = num_stages
_A : Any = hidden_sizes
_A : Union[str, Any] = depths
_A : Union[str, Any] = is_training
_A : Tuple = use_labels
_A : Optional[Any] = intermediate_size
_A : Union[str, Any] = hidden_act
_A : Any = num_labels
_A : List[str] = initializer_range
_A : str = out_features
_A : int = out_indices
_A : List[Any] = scope
def a__ ( self ) -> str:
_A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : str = None
if self.use_labels:
_A : int = ids_tensor([self.batch_size] , self.num_labels )
_A : str = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[str]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self , _a , _a , _a ) -> int:
_A : int = ConvNextModel(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self , _a , _a , _a ) -> List[Any]:
_A : Union[str, Any] = ConvNextForImageClassification(_a )
model.to(_a )
model.eval()
_A : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self , _a , _a , _a ) -> str:
_A : List[str] = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_A : Optional[Any] = None
_A : str = ConvNextBackbone(config=_a )
model.to(_a )
model.eval()
_A : int = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self ) -> int:
_A : int = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_a = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_a = True
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Dict:
_A : int = ConvNextModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> Optional[Any]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[Any] = model_class(_a )
_A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : List[Any] = [*signature.parameters.keys()]
_A : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Tuple:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def a__ ( self ) -> Tuple:
def check_hidden_states_output(_a , _a , _a ):
_A : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_A : Dict = model(**self._prepare_for_class(_a , _a ) )
_A : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_A : Dict = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : List[Any] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
check_hidden_states_output(_a , _a , _a )
def a__ ( self ) -> int:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> Optional[int]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Optional[Any] = ConvNextModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> str:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[Any]:
_A : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_a )
_A : List[str] = self.default_image_processor
_A : int = prepare_img()
_A : Union[str, Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : Dict = model(**_a )
# verify the logits
_A : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Any = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
_a = (ConvNextBackbone,) if is_torch_available() else ()
_a = ConvNextConfig
_a = False
def a__ ( self ) -> List[str]:
_A : Optional[int] = ConvNextModelTester(self )
| 343
| 0
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['image_processor', 'tokenizer']
lowerCamelCase__ = 'CLIPImageProcessor'
lowerCamelCase__ = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", __a, )
_lowerCAmelCase : str = kwargs.pop("feature_extractor")
_lowerCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__a, __a)
def __call__( self, __a=None, __a=None, __a=None, **__a):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
_lowerCAmelCase : Tuple = self.tokenizer(__a, return_tensors=__a, **__a)
if images is not None:
_lowerCAmelCase : int = self.image_processor(__a, return_tensors=__a, **__a)
if text is not None and images is not None:
_lowerCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a), tensor_type=__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 36
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( lowerCamelCase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """CLIPImageProcessor"""
lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
UpperCamelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ) -> int:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor
| 370
|
'''simple docstring'''
from PIL import Image
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Image:
def brightness(__UpperCamelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE__ = change_brightness(img, 1_0_0)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 183
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['input_features', 'attention_mask']
def __init__( self , lowercase=80 , lowercase=16_000 , lowercase=80 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase=True , **lowercase , ) -> Optional[Any]:
super().__init__(feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , **lowercase )
lowerCAmelCase = num_mel_bins
lowerCAmelCase = do_ceptral_normalize
lowerCAmelCase = normalize_means
lowerCAmelCase = normalize_vars
lowerCAmelCase = True
def _snake_case ( self , lowercase , ) -> np.ndarray:
lowerCAmelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase = torch.from_numpy(lowercase ).unsqueeze(0 )
lowerCAmelCase = ta_kaldi.fbank(lowercase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( lowercase , lowercase , lowercase = True , lowercase = True , lowercase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
lowerCAmelCase = x[:input_length].mean(axis=0 )
lowerCAmelCase = np.subtract(lowercase , lowercase )
if normalize_vars:
lowerCAmelCase = x[:input_length].std(axis=0 )
lowerCAmelCase = np.divide(lowercase , lowercase )
if input_length < x.shape[0]:
lowerCAmelCase = padding_value
# make sure array is in float32
lowerCAmelCase = x.astype(np.floataa )
return x
def _snake_case ( self , lowercase , lowercase = None ) -> List[np.ndarray]:
lowerCAmelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowercase , lowercase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowercase , lowercase )
]
def __call__( self , lowercase , lowercase = False , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase = isinstance(lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase = is_batched_numpy or (
isinstance(lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray ):
lowerCAmelCase = np.asarray(lowercase , dtype=np.floataa )
elif isinstance(lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase = [raw_speech]
# extract fbank features
lowerCAmelCase = [self._extract_fbank_features(lowercase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase = BatchFeature({"""input_features""": features} )
lowerCAmelCase = self.pad(
lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , **lowercase , )
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowercase ):
lowerCAmelCase = [np.asarray(lowercase , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCAmelCase = [np.asarray(lowercase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase = (
np.array(lowercase , dtype=np.intaa )
if self._get_padding_strategies(lowercase , max_length=lowercase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowercase )
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(lowercase )
return padded_inputs
| 46
|
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = [1]
lowercase__ , lowercase__ , lowercase__ = 0, 0, 0
lowercase__ = ugly_nums[ia] * 2
lowercase__ = ugly_nums[ia] * 3
lowercase__ = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase_ ):
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
ugly_nums.append(lowerCamelCase_ )
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowercase__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_00) = }")
| 207
| 0
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_a : Union[str, Any] = 'true'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int=82 ,_lowerCamelCase : Union[str, Any]=16 ) -> Tuple:
set_seed(42 )
_lowerCAmelCase : Optional[int] = RegressionModel()
_lowerCAmelCase : Any = deepcopy(_lowerCamelCase )
_lowerCAmelCase : int = RegressionDataset(length=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = DataLoader(_lowerCamelCase ,batch_size=_lowerCamelCase )
model.to(accelerator.device )
_lowerCAmelCase , _lowerCAmelCase : Dict = accelerator.prepare(_lowerCamelCase ,_lowerCamelCase )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator ,_lowerCamelCase : Optional[int]=False ) -> str:
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_lowerCAmelCase : List[str] = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(_lowerCamelCase : Optional[Any] ):
_lowerCAmelCase : Optional[int] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_lowerCamelCase ,max_length=_lowerCamelCase )
return outputs
with accelerator.main_process_first():
_lowerCAmelCase : int = dataset.map(
_lowerCamelCase ,batched=_lowerCamelCase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
_lowerCAmelCase : int = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_lowerCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(_lowerCamelCase ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return DataLoader(_lowerCamelCase ,shuffle=_lowerCamelCase ,collate_fn=_lowerCamelCase ,batch_size=16 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : Optional[Any] = Accelerator(dispatch_batches=_lowerCamelCase ,split_batches=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_dataloader(_lowerCamelCase ,not dispatch_batches )
_lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : int = accelerator.prepare(_lowerCamelCase ,_lowerCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ) -> Union[str, Any]:
_lowerCAmelCase : str = []
for batch in dataloader:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = batch.values()
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_lowerCAmelCase , _lowerCAmelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCamelCase )
targs.append(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = torch.cat(_lowerCamelCase ), torch.cat(_lowerCamelCase )
return logits, targs
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Accelerator ,_lowerCamelCase : str=82 ,_lowerCamelCase : Optional[int]=False ,_lowerCamelCase : Tuple=False ,_lowerCamelCase : Optional[Any]=16 ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = get_basic_setup(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : List[str] = generate_predictions(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
assert (
len(_lowerCamelCase ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCamelCase )}"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : bool = False ,_lowerCamelCase : bool = False ) -> List[str]:
_lowerCAmelCase : Optional[int] = evaluate.load("""glue""" ,"""mrpc""" )
_lowerCAmelCase , _lowerCAmelCase : int = get_mrpc_setup(_lowerCamelCase ,_lowerCamelCase )
# First do baseline
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = setup["""no"""]
model.to(_lowerCamelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCamelCase )
with torch.inference_mode():
_lowerCAmelCase : str = model(**_lowerCamelCase )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCamelCase ,references=batch["""labels"""] )
_lowerCAmelCase : Optional[int] = metric.compute()
# Then do distributed
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_lowerCAmelCase : int = model(**_lowerCamelCase )
_lowerCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase : Tuple = batch["""labels"""]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCamelCase ,references=_lowerCamelCase )
_lowerCAmelCase : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : Union[str, Any] = Accelerator(split_batches=_lowerCamelCase ,dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(_lowerCamelCase ,_lowerCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_lowerCAmelCase : Optional[int] = Accelerator(split_batches=_lowerCamelCase ,dispatch_batches=_lowerCamelCase )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(_lowerCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_lowerCAmelCase : str = Accelerator()
test_torch_metrics(_lowerCamelCase ,512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 126
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self , a__ , a__=2 , a__=3 , a__=4 , a__=2 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=36 , a__=3 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=6 , a__=6 , a__=3 , a__=4 , a__=None , a__=1000 , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : Optional[Any] = batch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : str = image_size
_lowerCAmelCase : str = patch_size
_lowerCAmelCase : str = text_seq_length
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : List[str] = use_input_mask
_lowerCAmelCase : List[Any] = use_token_type_ids
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Dict = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Optional[int] = shape_size
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase : Optional[int] = text_seq_length
_lowerCAmelCase : Any = (image_size // patch_size) ** 2 + 1
_lowerCAmelCase : Any = self.text_seq_length + self.image_seq_length
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase : Optional[Any] = bbox[i, j, 3]
_lowerCAmelCase : List[str] = bbox[i, j, 1]
_lowerCAmelCase : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase : int = bbox[i, j, 2]
_lowerCAmelCase : Optional[int] = bbox[i, j, 0]
_lowerCAmelCase : Optional[int] = t
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : int = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = LayoutLMvaModel(config=a__ )
model.to(a__ )
model.eval()
# text + image
_lowerCAmelCase : Optional[Any] = model(a__ , pixel_values=a__ )
_lowerCAmelCase : Any = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ )
_lowerCAmelCase : List[Any] = model(a__ , bbox=a__ , pixel_values=a__ , token_type_ids=a__ )
_lowerCAmelCase : Tuple = model(a__ , bbox=a__ , pixel_values=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase : Dict = model(a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase : Optional[Any] = model(pixel_values=a__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = LayoutLMvaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Tuple = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : str = LayoutLMvaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = LayoutLMvaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(
a__ , bbox=a__ , pixel_values=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Any = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __A ( self ):
_lowerCAmelCase : Any = LayoutLMvaModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self , a__ , a__ , a__=False ):
_lowerCAmelCase : List[str] = copy.deepcopy(a__ )
if model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(a__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a__ ):
_lowerCAmelCase : List[Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in get_values(a__ ):
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
_lowerCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
elif model_class in [
*get_values(a__ ),
]:
_lowerCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=a__ , )
return inputs_dict
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def __A ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : str = LayoutLMvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return LayoutLMvaImageProcessor(apply_ocr=a__ ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=a__ , return_tensors="""pt""" ).pixel_values.to(a__ )
_lowerCAmelCase : Optional[Any] = torch.tensor([[1, 2]] )
_lowerCAmelCase : Dict = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
_lowerCAmelCase : str = model(
input_ids=input_ids.to(a__ ) , bbox=bbox.to(a__ ) , pixel_values=pixel_values.to(a__ ) , )
# verify the logits
_lowerCAmelCase : Optional[int] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , a__ )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4 ) )
| 126
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->List[str]:
print('Loading config file...' )
def flatten_yaml_as_dict(snake_case_ : str , snake_case_ : Optional[int]="" , snake_case_ : Optional[Any]="." ):
lowerCamelCase__ : Union[str, Any] =[]
for k, v in d.items():
lowerCamelCase__ : Any =parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
lowerCamelCase__ : str =argparse.Namespace()
with open(snake_case_ , 'r' ) as yaml_file:
try:
lowerCamelCase__ : Union[str, Any] =yaml.load(snake_case_ , Loader=yaml.FullLoader )
lowerCamelCase__ : List[Any] =flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(snake_case_ , str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Tuple =MobileViTVaConfig()
lowerCamelCase__ : List[Any] =False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase__ : List[str] =1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
lowerCamelCase__ : Optional[int] =3_8_4
else:
lowerCamelCase__ : Dict =2_5_6
lowerCamelCase__ : Dict ='imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase__ : Tuple =2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
lowerCamelCase__ : Tuple =3_8_4
else:
lowerCamelCase__ : List[str] =2_5_6
lowerCamelCase__ : Any ='imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase__ : int =1_5_1
lowerCamelCase__ : int =5_1_2
lowerCamelCase__ : Dict ='ade20k-id2label.json'
lowerCamelCase__ : Union[str, Any] =True
elif task_name.startswith('voc_' ):
lowerCamelCase__ : Optional[Any] =2_1
lowerCamelCase__ : Dict =5_1_2
lowerCamelCase__ : Union[str, Any] ='pascal-voc-id2label.json'
lowerCamelCase__ : int =True
# orig_config
lowerCamelCase__ : Union[str, Any] =load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : List[str] =getattr(snake_case_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(snake_case_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : List[Any] =getattr(snake_case_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : str =getattr(snake_case_ , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : Dict =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
lowerCamelCase__ : str =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
lowerCamelCase__ : Dict =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowerCamelCase__ : Union[str, Any] ='huggingface/label-files'
lowerCamelCase__ : List[Any] =json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[str] ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] =idalabel
lowerCamelCase__ : List[str] ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Tuple ) ->Tuple:
lowerCamelCase__ : List[Any] =dct.pop(snake_case_ )
lowerCamelCase__ : str =val
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any=False ) ->str:
if base_model:
lowerCamelCase__ : Tuple =''
else:
lowerCamelCase__ : Optional[Any] ='mobilevitv2.'
lowerCamelCase__ : Tuple =[]
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Tuple =k[8:]
else:
lowerCamelCase__ : Tuple =k
if ".block." in k:
lowerCamelCase__ : Optional[Any] =k_new.replace('.block.' , '.' )
if ".conv." in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowerCamelCase__ : Dict =k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowerCamelCase__ : Dict =k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
lowerCamelCase__ : Tuple =k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
lowerCamelCase__ : Tuple =k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : Optional[int] =[0, 1]
elif i == 4:
lowerCamelCase__ : int =[0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : List[str] =[0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
lowerCamelCase__ : Any =k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
lowerCamelCase__ : Optional[Any] =k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : int =k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : Optional[int] =k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : List[str] =k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Any =k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : Tuple =k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase__ : List[Any] =k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowerCamelCase__ : int =k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase__ : List[str] =k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowerCamelCase__ : Optional[int] =k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->str:
lowerCamelCase__ : str =[]
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( ) ->Optional[int]:
lowerCamelCase__ : List[str] ='http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Any =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ) ->Optional[int]:
lowerCamelCase__ : int =get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
lowerCamelCase__ : List[str] =torch.load(snake_case_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase__ : List[str] =MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
lowerCamelCase__ : Optional[Any] =False
else:
lowerCamelCase__ : Union[str, Any] =MobileViTVaForImageClassification(snake_case_ ).eval()
lowerCamelCase__ : List[Any] =False
# remove and rename some keys of load the original model
lowerCamelCase__ : Union[str, Any] =checkpoint
remove_unused_keys(snake_case_ )
lowerCamelCase__ : List[str] =create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : str =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
lowerCamelCase__ : int =image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Optional[int] =model(**snake_case_ )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase__ : Tuple =outputs.logits
lowerCamelCase__ : Optional[int] =logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Tuple =torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 126
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :Any=7 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=99 , lowerCamelCase_ :Dict=32 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :List[Any]=512 , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Tuple=0.02 , lowerCamelCase_ :Tuple=4 , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : List[Any] =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Optional[int] =is_training
lowerCamelCase__ : Optional[Any] =use_attention_mask
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Any =vocab_size
lowerCamelCase__ : int =hidden_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : str =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : List[Any] =max_position_embeddings
lowerCamelCase__ : Tuple =type_vocab_size
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : str =num_choices
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any =None
if self.use_attention_mask:
lowerCamelCase__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any =None
if self.use_token_type_ids:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : int ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =FlaxBertModelTester(self )
@slow
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Dict =FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCamelCase__ : List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 126
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
for j in range(i + 1 , lowerCAmelCase ):
if numbers[j] < numbers[i]:
UpperCAmelCase , UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase_ : Dict = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 248
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase = 4 ):
'''simple docstring'''
UpperCAmelCase = abs(lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase )] for y in range(lowerCAmelCase )]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return reverse_row(transpose(lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return reverse_row(reverse_column(lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return reverse_column(transpose(lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [list(lowerCAmelCase ) for x in zip(*lowerCAmelCase )]
return matrix
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = matrix[::-1]
return matrix
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
for i in matrix:
print(*lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Any = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
lowerCAmelCase_ : Union[str, Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_ : Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 248
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = StableDiffusionInpaintPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase = frozenset([] )
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
__a : Dict = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
__a : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__a : Tuple = CLIPTextModel(_UpperCAmelCase )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__a : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : int = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
__a : Union[str, Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : int = torch.manual_seed(_UpperCAmelCase )
else:
__a : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Tuple = self.get_dummy_components()
__a : Tuple = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
__a : Optional[int] = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = self.get_dummy_inputs(_UpperCAmelCase )
__a : Tuple = sd_pipe(**_UpperCAmelCase ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : Optional[Any] = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
__a : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a : int = '''stabilityai/stable-diffusion-2-inpainting'''
__a : Dict = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a : Tuple = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowerCamelCase ( self ):
__a : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a : int = '''stabilityai/stable-diffusion-2-inpainting'''
__a : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
__a : Any = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a : Any = torch.manual_seed(0 )
__a : Union[str, Any] = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCamelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a : Optional[int] = '''stabilityai/stable-diffusion-2-inpainting'''
__a : Tuple = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
__a : int = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : List[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a : str = torch.manual_seed(0 )
__a : Tuple = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
__a : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 160
|
"""simple docstring"""
def __A ( a_ :int = 60_08_51_47_51_43) -> int:
try:
__a : List[Any] = int(a_)
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''')
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''')
__a : int = 1
__a : List[Any] = 2
while i * i <= n:
while n % i == 0:
__a : List[str] = i
n //= i
i += 1
if n > 1:
__a : Optional[int] = n
return int(a_)
if __name__ == "__main__":
print(F'{solution() = }')
| 160
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowercase : Union[str, Any] = ReformerTokenizer
lowercase : Any = ReformerTokenizerFast
lowercase : List[Any] = True
lowercase : Dict = False
lowercase : Dict = True
def a__ ( self :List[Any] ):
super().setUp()
snake_case_ : Any = ReformerTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self :List[Any] ):
snake_case_ : str = """<s>"""
snake_case_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) ,_UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) ,_UpperCamelCase )
def a__ ( self :Any ):
snake_case_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(_UpperCamelCase ) ,1_0_0_0 )
def a__ ( self :Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0 )
def a__ ( self :Union[str, Any] ):
if not self.test_rust_tokenizer:
return
snake_case_ : Union[str, Any] = self.get_tokenizer()
snake_case_ : List[str] = self.get_rust_tokenizer()
snake_case_ : str = """I was born in 92000, and this is falsé."""
snake_case_ : List[Any] = tokenizer.tokenize(_UpperCamelCase )
snake_case_ : List[str] = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Union[str, Any] = tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
snake_case_ : int = rust_tokenizer.encode(_UpperCamelCase ,add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : int = self.get_rust_tokenizer()
snake_case_ : List[str] = tokenizer.encode(_UpperCamelCase )
snake_case_ : List[str] = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Optional[Any] ,_UpperCamelCase :str=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase ,**_UpperCamelCase )
# Simple input
snake_case_ : List[str] = """This is a simple input"""
snake_case_ : List[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
snake_case_ : Dict = ("""This is a simple input""", """This is a pair""")
snake_case_ : int = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Simple input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Simple input
self.assertRaises(
_UpperCamelCase ,tokenizer_r.batch_encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" ,)
# Pair input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Pair input
self.assertRaises(_UpperCamelCase ,tokenizer_r.encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" )
# Pair input
self.assertRaises(
_UpperCamelCase ,tokenizer_r.batch_encode_plus ,_UpperCamelCase ,max_length=_UpperCamelCase ,padding="""max_length""" ,)
def a__ ( self :str ):
pass
def a__ ( self :Optional[Any] ):
snake_case_ : Any = ReformerTokenizer(_UpperCamelCase ,keep_accents=_UpperCamelCase )
snake_case_ : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ,)
snake_case_ : Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ,)
snake_case_ : str = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
@cached_property
def a__ ( self :Union[str, Any] ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def a__ ( self :Dict ):
snake_case_ : int = """Hello World!"""
snake_case_ : Optional[int] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def a__ ( self :Optional[int] ):
snake_case_ : Union[str, Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case_ : Dict = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(_UpperCamelCase ,self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def a__ ( self :Dict ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case_ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
snake_case_ : Tuple = """ """.join(_UpperCamelCase )
snake_case_ : Tuple = self.big_tokenizer.encode_plus(_UpperCamelCase ,return_tensors="""pt""" )
snake_case_ : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] ,return_tensors="""pt""" )
snake_case_ : Tuple = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case_ : int = encoded_sequence["""input_ids"""].shape
snake_case_ : Optional[Any] = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def a__ ( self :List[str] ):
# fmt: off
snake_case_ : Optional[Any] = {"""input_ids""": [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case_ : Any = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase ,model_name="""google/reformer-crime-and-punishment""" ,revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" ,padding=_UpperCamelCase ,sequences=_UpperCamelCase ,)
| 8
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : int = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8
| 1
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowercase_ = "."
if __name__ == "__main__":
lowercase_ = os.path.join(REPO_PATH, "utils/documentation_tests.txt")
lowercase_ = []
lowercase_ = []
with open(doctest_file_path) as fp:
for line in fp:
lowercase_ = line.strip()
lowercase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowercase_ = "\n".join(non_existent_paths)
raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
| 45
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
a_ : Tuple = []
for line in lines:
a_ : Any = re.sub(R'#.*' , '' , __A ) # remove comments
if line:
filtered_lines.append(__A )
a_ : Tuple = '\n'.join(__A )
# Make a hash from all this code
a_ : Tuple = full_str.encode('utf-8' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
UpperCAmelCase_ : List[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCAmelCase_ : Dict = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCAmelCase_ : Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCAmelCase_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 32
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase__ , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'bird'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCAmelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(UpperCAmelCase__ , jax.device_count() )
lowerCAmelCase = replicate(UpperCAmelCase__ )
lowerCAmelCase = shard(UpperCAmelCase__ )
lowerCAmelCase = shard(UpperCAmelCase__ )
lowerCAmelCase = pipe(
prompt_ids=UpperCAmelCase__ , image=UpperCAmelCase__ , params=UpperCAmelCase__ , prng_seed=UpperCAmelCase__ , num_inference_steps=5_0 , jit=UpperCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase , lowerCAmelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase__ , from_pt=UpperCAmelCase__ , dtype=jnp.bfloataa )
lowerCAmelCase = controlnet_params
lowerCAmelCase = 'Chef in the kitchen'
lowerCAmelCase = jax.device_count()
lowerCAmelCase = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCAmelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase = jax.random.PRNGKey(0 )
lowerCAmelCase = jax.random.split(UpperCAmelCase__ , jax.device_count() )
lowerCAmelCase = replicate(UpperCAmelCase__ )
lowerCAmelCase = shard(UpperCAmelCase__ )
lowerCAmelCase = shard(UpperCAmelCase__ )
lowerCAmelCase = pipe(
prompt_ids=UpperCAmelCase__ , image=UpperCAmelCase__ , params=UpperCAmelCase__ , prng_seed=UpperCAmelCase__ , num_inference_steps=5_0 , jit=UpperCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 55
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__snake_case =logging.getLogger(__name__)
torch.set_grad_enabled(False)
__snake_case ="""cuda""" if torch.cuda.is_available() else """cpu"""
def a_ ( lowerCamelCase : str , lowerCamelCase : int=100 , lowerCamelCase : List[Any]=" " ):
lowerCAmelCase = text.split(lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
def a_ ( lowerCamelCase : dict ):
lowerCAmelCase , lowerCAmelCase = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(lowerCamelCase ):
titles.append(title if title is not None else '' )
texts.append(lowerCamelCase )
return {"title": titles, "text": texts}
def a_ ( lowerCamelCase : dict , lowerCamelCase : DPRContextEncoder , lowerCamelCase : DPRContextEncoderTokenizerFast ):
lowerCAmelCase = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase = ctx_encoder(input_ids.to(device=lowerCamelCase ) , return_dict=lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a_ ( lowerCamelCase : "RagExampleArguments" , lowerCamelCase : "ProcessingArguments" , lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase )
lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase = dataset.map(
partial(lowerCamelCase , ctx_encoder=lowerCamelCase , ctx_tokenizer=lowerCamelCase ) , batched=lowerCamelCase , batch_size=processing_args.batch_size , features=lowerCamelCase , )
# And finally save your dataset
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase )
# And save the index
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCamelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCamelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCamelCase : Optional[str] = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCamelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__snake_case =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__snake_case , __snake_case , __snake_case =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__snake_case =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 55
| 1
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCAmelCase = None
try:
import msvcrt
except ImportError:
__UpperCAmelCase = None
try:
import fcntl
except ImportError:
__UpperCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCAmelCase = OSError
# Data
# ------------------------------------------------
__UpperCAmelCase = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
__UpperCAmelCase = '''3.0.12'''
__UpperCAmelCase = None
def UpperCamelCase ( ) -> Tuple:
global _logger
UpperCamelCase : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : Union[str, Any] = lock_file
return None
def __str__( self ) -> int:
UpperCamelCase : Any = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Tuple = lock
return None
def __enter__( self ) -> List[str]:
return self.lock
def __exit__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
self.lock.release()
return None
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=-1, SCREAMING_SNAKE_CASE_=None ) -> Any:
UpperCamelCase : str = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
UpperCamelCase : Any = self.hash_filename_if_too_long(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# The path to the lock file.
UpperCamelCase : List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Tuple = None
# The default timeout value.
UpperCamelCase : Dict = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Dict = 0
return None
@property
def snake_case_ ( self ) -> Optional[int]:
return self._lock_file
@property
def snake_case_ ( self ) -> str:
return self._timeout
@timeout.setter
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = float(SCREAMING_SNAKE_CASE_ )
return None
def snake_case_ ( self ) -> Tuple:
raise NotImplementedError()
def snake_case_ ( self ) -> List[str]:
raise NotImplementedError()
@property
def snake_case_ ( self ) -> str:
return self._lock_file_fd is not None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
UpperCamelCase : str = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : List[Any] = id(self )
UpperCamelCase : Optional[Any] = self._lock_file
UpperCamelCase : int = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(SCREAMING_SNAKE_CASE_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : Optional[int] = max(0, self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : Tuple = id(self )
UpperCamelCase : str = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
UpperCamelCase : List[str] = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ) -> List[Any]:
self.acquire()
return self
def __exit__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
self.release()
return None
def __del__( self ) -> List[Any]:
self.release(force=SCREAMING_SNAKE_CASE_ )
return None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Optional[Any] = os.path.basename(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > max_length and max_length > 0:
UpperCamelCase : List[Any] = os.path.dirname(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = str(hash(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Dict = filename[: max_length - len(SCREAMING_SNAKE_CASE_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
else:
return path
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=-1, SCREAMING_SNAKE_CASE_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(SCREAMING_SNAKE_CASE_, timeout=SCREAMING_SNAKE_CASE_, max_filename_length=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : Optional[int] = os.open(self._lock_file, SCREAMING_SNAKE_CASE_ )
except OSError:
pass
else:
try:
msvcrt.locking(SCREAMING_SNAKE_CASE_, msvcrt.LK_NBLCK, 1 )
except OSError:
os.close(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[int] = fd
return None
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = self._lock_file_fd
UpperCamelCase : Dict = None
msvcrt.locking(SCREAMING_SNAKE_CASE_, msvcrt.LK_UNLCK, 1 )
os.close(SCREAMING_SNAKE_CASE_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=-1, SCREAMING_SNAKE_CASE_=None ) -> List[str]:
UpperCamelCase : List[Any] = os.statvfs(os.path.dirname(SCREAMING_SNAKE_CASE_ ) ).f_namemax
super().__init__(SCREAMING_SNAKE_CASE_, timeout=SCREAMING_SNAKE_CASE_, max_filename_length=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : Any = os.open(self._lock_file, SCREAMING_SNAKE_CASE_ )
try:
fcntl.flock(SCREAMING_SNAKE_CASE_, fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(SCREAMING_SNAKE_CASE_ )
else:
UpperCamelCase : Optional[int] = fd
return None
def snake_case_ ( self ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
UpperCamelCase : List[Any] = self._lock_file_fd
UpperCamelCase : int = None
fcntl.flock(SCREAMING_SNAKE_CASE_, fcntl.LOCK_UN )
os.close(SCREAMING_SNAKE_CASE_ )
return None
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> int:
UpperCamelCase : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : List[str] = os.open(self._lock_file, SCREAMING_SNAKE_CASE_ )
except OSError:
pass
else:
UpperCamelCase : Optional[Any] = fd
return None
def snake_case_ ( self ) -> Union[str, Any]:
os.close(self._lock_file_fd )
UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCAmelCase = None
if msvcrt:
__UpperCAmelCase = WindowsFileLock
elif fcntl:
__UpperCAmelCase = UnixFileLock
else:
__UpperCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 119
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> Union[str, Any]:
UpperCamelCase : str = size if size is not None else {'height': 18, 'width': 18}
UpperCamelCase : int = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : int = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Optional[int] = min_resolution
UpperCamelCase : Optional[Any] = max_resolution
UpperCamelCase : Union[str, Any] = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : int = do_normalize
def snake_case_ ( self ) -> Tuple:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = ImageGPTImageProcessor if is_vision_available() else None
def snake_case_ ( self ) -> int:
UpperCamelCase : str = ImageGPTImageProcessingTester(self )
@property
def snake_case_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'clusters' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) )
def snake_case_ ( self ) -> str:
UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 18, 'width': 18} )
UpperCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'height': 42, 'width': 42} )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase : int = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, obj[key] ) )
else:
self.assertEqual(obj[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE_, 'image_processor.json' )
image_processor_first.to_json_file(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.image_processing_class.from_json_file(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : List[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.image_processing_class.from_pretrained(SCREAMING_SNAKE_CASE_ ).to_dict()
UpperCamelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_, image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key], SCREAMING_SNAKE_CASE_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def snake_case_ ( self ) -> str:
pass
def UpperCamelCase ( ) -> int:
UpperCamelCase : Optional[int] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
UpperCamelCase : int = Image.open(dataset[4]['file'] )
UpperCamelCase : Optional[Any] = Image.open(dataset[5]['file'] )
UpperCamelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
UpperCamelCase : List[str] = prepare_images()
# test non-batched
UpperCamelCase : int = image_processing(images[0], return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (1, 1024) )
UpperCamelCase : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist(), SCREAMING_SNAKE_CASE_ )
# test batched
UpperCamelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_, return_tensors='pt' )
self.assertIsInstance(encoding.input_ids, torch.LongTensor )
self.assertEqual(encoding.input_ids.shape, (2, 1024) )
UpperCamelCase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist(), SCREAMING_SNAKE_CASE_ )
| 119
| 1
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_a : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=13,__SCREAMING_SNAKE_CASE=7,__SCREAMING_SNAKE_CASE=14,__SCREAMING_SNAKE_CASE=10,__SCREAMING_SNAKE_CASE=19,__SCREAMING_SNAKE_CASE=5,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE=4,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=[1, 2, 3, 4, 5],__SCREAMING_SNAKE_CASE=25,__SCREAMING_SNAKE_CASE=5,):
'''simple docstring'''
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def lowerCamelCase__ ( self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,prediction_length=self.prediction_length,context_length=self.context_length,label_length=self.label_length,lags_sequence=self.lags_sequence,num_time_features=self.num_time_features,num_static_categorical_features=1,cardinality=[self.cardinality],embedding_dimension=[self.embedding_dimension],moving_average=self.moving_average,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1],config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__SCREAMING_SNAKE_CASE )
return config, inputs_dict
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = AutoformerModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase = model(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]),dim=-1,)
__lowerCAmelCase = encoder(inputs_embeds=__SCREAMING_SNAKE_CASE )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...],dim=1 )
.unsqueeze(1 )
.repeat(1,config.prediction_length,1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]],device=enc_input.device,)
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean),dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
),dim=-1,)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = decoder(
trend=__SCREAMING_SNAKE_CASE,inputs_embeds=__SCREAMING_SNAKE_CASE,encoder_hidden_states=__SCREAMING_SNAKE_CASE,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
a : List[Any] ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
a : Any =False
a : Optional[int] =False
a : List[Any] =False
a : Any =False
a : List[Any] =False
a : Optional[Any] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self,config_class=__SCREAMING_SNAKE_CASE,has_text_modality=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__SCREAMING_SNAKE_CASE,output_loading_info=__SCREAMING_SNAKE_CASE )
self.assertEqual(info["""missing_keys"""],[] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = inspect.signature(getattr(__SCREAMING_SNAKE_CASE,"""forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name,__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__SCREAMING_SNAKE_CASE )],__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester,"""seq_length""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = getattr(self.model_tester,"""decoder_seq_length""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = getattr(self.model_tester,"""encoder_seq_length""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = getattr(self.model_tester,"""d_model""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = getattr(self.model_tester,"""num_attention_heads""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
__lowerCAmelCase = len(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__SCREAMING_SNAKE_CASE,(list, tuple) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__SCREAMING_SNAKE_CASE,(list, tuple) )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, decoder_seq_length, dim],)
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 2,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, dim],)
@is_flaky()
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _lowerCAmelCase ( lowercase="train-batch.pt" ) -> Union[str, Any]:
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=lowercase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(lowercase , map_location=lowercase )
return batch
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""],past_time_features=batch["""past_time_features"""],past_observed_mask=batch["""past_observed_mask"""],static_categorical_features=batch["""static_categorical_features"""],future_values=batch["""future_values"""],future_time_features=batch["""future_time_features"""],)[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]],device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3],__SCREAMING_SNAKE_CASE,atol=__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""],past_time_features=batch["""past_time_features"""],past_observed_mask=batch["""past_observed_mask"""],static_categorical_features=batch["""static_categorical_features"""],).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]],device=__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3],__SCREAMING_SNAKE_CASE,atol=__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""],past_time_features=batch["""past_time_features"""],past_values=batch["""past_values"""],future_time_features=batch["""future_time_features"""],past_observed_mask=batch["""past_observed_mask"""],)
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.tensor([3130.6763, 4056.5293, 7053.0786],device=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:],__SCREAMING_SNAKE_CASE,rtol=1e-1 ) )
| 46
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _lowerCAmelCase ( lowercase ) -> List[Any]:
__lowerCAmelCase = [False] * len(lowercase )
__lowerCAmelCase = [-1] * len(lowercase )
def dfs(lowercase , lowercase ):
__lowerCAmelCase = True
__lowerCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(lowercase , 1 - c )
for i in range(len(lowercase ) ):
if not visited[i]:
dfs(lowercase , 0 )
for i in range(len(lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_a : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 46
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Optional[int] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Optional[int] = [3, 3, 3, 3]
a : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
a : List[str] = [4, 4, 4, 4]
a : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
a : Any = [3, 3, 3, 3]
else:
a : str = [2, 2, 2, 2]
if "tiny" in model_name:
a : Optional[Any] = 96
elif "small" in model_name:
a : List[Any] = 96
elif "base" in model_name:
a : Tuple = 128
elif "large" in model_name:
a : Optional[Any] = 192
elif "xlarge" in model_name:
a : Dict = 256
elif "huge" in model_name:
a : Optional[int] = 352
# set label information
a : List[str] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Any = "imagenet-22k-id2label.json"
else:
a : Union[str, Any] = "imagenet-1k-id2label.json"
a : Any = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : List[Any] = FocalNetConfig(
embed_dim=A_ , depths=A_ , focal_levels=A_ , focal_windows=A_ , use_conv_embed=A_ , idalabel=A_ , labelaid=A_ , use_post_layernorm=A_ , use_layerscale=A_ , )
return config
def lowercase ( A_ )-> str:
'''simple docstring'''
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : Any = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Tuple = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : List[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : Optional[int] = "layernorm.weight"
if name == "norm.bias":
a : Optional[int] = "layernorm.bias"
if "head" in name:
a : Union[str, Any] = name.replace("head" , "classifier" )
else:
a : str = "focalnet." + name
return name
def lowercase ( A_ , A_ , A_=False )-> Tuple:
'''simple docstring'''
a : Dict = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : List[str] = model_name_to_url[model_name]
print("Checkpoint URL: " , A_ )
a : str = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : int = state_dict.pop(A_ )
a : Dict = val
a : Optional[int] = get_focalnet_config(A_ )
a : str = FocalNetForImageClassification(A_ )
model.eval()
# load state dict
model.load_state_dict(A_ )
# verify conversion
a : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Any = BitImageProcessor(
do_resize=A_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A_ , crop_size=224 , do_normalize=A_ , image_mean=A_ , image_std=A_ , )
a : str = Image.open(requests.get(A_ , stream=A_ ).raw )
a : Optional[int] = processor(images=A_ , return_tensors="pt" )
a : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a : Optional[Any] = image_transforms(A_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A_ , atol=1e-4 )
a : Optional[Any] = model(**A_ )
a : Any = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
a : Union[str, Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
a : str = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
a : Tuple = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
a : Dict = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 355
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
UpperCamelCase_ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
UpperCamelCase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class snake_case :
def __init__( self) ->Optional[int]:
a_ = WATERMARK_BITS
a_ = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# can't encode images that are smaller than 256
if images.shape[-1] < 2_56:
return images
a_ = (2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1).float().numpy()
a_ = [self.encoder.encode(__UpperCAmelCase , "dwtDct") for image in images]
a_ = torch.from_numpy(np.array(__UpperCAmelCase)).permute(0 , 3 , 1 , 2)
a_ = torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0)
return images
| 303
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A = None , __A = None ) -> None:
if start is None:
_snake_case = 0
if end is None:
_snake_case = len(__A ) - 1
if start >= end:
return
_snake_case = (start + end) // 2
slowsort(__A , __A , __A )
slowsort(__A , mid + 1 , __A )
if sequence[end] < sequence[mid]:
_snake_case , _snake_case = sequence[mid], sequence[end]
slowsort(__A , __A , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 42
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 1
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=0.999 , __SCREAMING_SNAKE_CASE : List[Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__SCREAMING_SNAKE_CASE : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase_ : Any = []
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = i / num_diffusion_timesteps
lowercase_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) )
return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa )
class UpperCamelCase ( lowercase_ , lowercase_ ):
lowercase = [e.name for e in KarrasDiffusionSchedulers]
lowercase = 2
@register_to_config
def __init__( self ,__UpperCamelCase = 1000 ,__UpperCamelCase = 0.0_0085 ,__UpperCamelCase = 0.012 ,__UpperCamelCase = "linear" ,__UpperCamelCase = None ,__UpperCamelCase = "epsilon" ,__UpperCamelCase = "linspace" ,__UpperCamelCase = 0 ,) -> Tuple:
'''simple docstring'''
if trained_betas is not None:
lowercase_ : Union[str, Any] = torch.tensor(__UpperCamelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ : Optional[int] = torch.linspace(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Any = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__UpperCamelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(__UpperCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowercase_ : Tuple = 1.0 - self.betas
lowercase_ : Any = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[Any]:
'''simple docstring'''
if schedule_timesteps is None:
lowercase_ : Any = self.timesteps
lowercase_ : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase_ : str = 1 if len(__UpperCamelCase ) > 1 else 0
else:
lowercase_ : List[str] = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep
lowercase_ : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,) -> torch.FloatTensor:
'''simple docstring'''
lowercase_ : List[str] = self.index_for_timestep(__UpperCamelCase )
if self.state_in_first_order:
lowercase_ : List[str] = self.sigmas[step_index]
else:
lowercase_ : Dict = self.sigmas_interpol[step_index]
lowercase_ : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = num_inference_steps
lowercase_ : Dict = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase_ : Union[str, Any] = np.linspace(0 ,num_train_timesteps - 1 ,__UpperCamelCase ,dtype=__UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase_ : Any = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : List[Any] = (np.arange(0 ,__UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : str = (np.arange(__UpperCamelCase ,0 ,-step_ratio )).round().copy().astype(__UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowercase_ : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase_ : int = torch.from_numpy(np.log(__UpperCamelCase ) ).to(__UpperCamelCase )
lowercase_ : List[Any] = np.interp(__UpperCamelCase ,np.arange(0 ,len(__UpperCamelCase ) ) ,__UpperCamelCase )
lowercase_ : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase )
# interpolate sigmas
lowercase_ : Dict = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
lowercase_ : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowercase_ : str = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCamelCase ).startswith('mps' ):
# mps does not support float64
lowercase_ : Optional[int] = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ,dtype=torch.floataa )
else:
lowercase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
# interpolate timesteps
lowercase_ : Dict = self.sigma_to_t(__UpperCamelCase ).to(__UpperCamelCase ,dtype=timesteps.dtype )
lowercase_ : int = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
lowercase_ : Dict = torch.cat([timesteps[:1], interleaved_timesteps] )
lowercase_ : Tuple = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase_ : Tuple = defaultdict(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = sigma.log()
# get distribution
lowercase_ : Tuple = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowercase_ : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowercase_ : Optional[Any] = low_idx + 1
lowercase_ : Optional[Any] = self.log_sigmas[low_idx]
lowercase_ : List[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
lowercase_ : int = (low - log_sigma) / (low - high)
lowercase_ : Dict = w.clamp(0 ,1 )
# transform interpolation to time range
lowercase_ : int = (1 - w) * low_idx + w * high_idx
lowercase_ : int = t.view(sigma.shape )
return t
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.sample is None
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = True ,) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
lowercase_ : List[Any] = self.index_for_timestep(__UpperCamelCase )
# advance index counter by 1
lowercase_ : Dict = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase_ : List[Any] = self.sigmas[step_index]
lowercase_ : int = self.sigmas_interpol[step_index + 1]
lowercase_ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowercase_ : Union[str, Any] = self.sigmas[step_index - 1]
lowercase_ : Any = self.sigmas_interpol[step_index]
lowercase_ : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase_ : str = 0
lowercase_ : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase_ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase_ : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
lowercase_ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase_ : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase_ : str = sigma_interpol - sigma_hat
# store for 2nd order step
lowercase_ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowercase_ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowercase_ : int = sigma_next - sigma_hat
lowercase_ : int = self.sample
lowercase_ : Any = None
lowercase_ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> torch.FloatTensor:
'''simple docstring'''
lowercase_ : List[str] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCamelCase ):
# mps does not support float64
lowercase_ : List[str] = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
lowercase_ : Tuple = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
lowercase_ : Optional[Any] = self.timesteps.to(original_samples.device )
lowercase_ : Tuple = timesteps.to(original_samples.device )
lowercase_ : Any = [self.index_for_timestep(__UpperCamelCase ,__UpperCamelCase ) for t in timesteps]
lowercase_ : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase_ : List[str] = sigma.unsqueeze(-1 )
lowercase_ : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[Any]:
'''simple docstring'''
return self.config.num_train_timesteps
| 321
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Any ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : bool = True ,):
if audio_length_in_s is None:
UpperCAmelCase__ = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCAmelCase__ = int(lowerCamelCase__ )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
UpperCAmelCase__ = int(lowerCamelCase__ )
UpperCAmelCase__ = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase__ = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=self.device ,dtype=lowerCamelCase__ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ ,device=audio.device )
UpperCAmelCase__ = self.scheduler.timesteps.to(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ = self.unet(lowerCamelCase__ ,lowerCamelCase__ ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ = self.scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
UpperCAmelCase__ = audio.clamp(-1 ,1 ).float().cpu().numpy()
UpperCAmelCase__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase__ )
| 98
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : str = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "xglm"
snake_case__ = ["past_key_values"]
snake_case__ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any ,lowerCamelCase__ : Any=256_008 ,lowerCamelCase__ : Optional[Any]=2_048 ,lowerCamelCase__ : List[str]=1_024 ,lowerCamelCase__ : List[str]=4_096 ,lowerCamelCase__ : Tuple=24 ,lowerCamelCase__ : Optional[int]=16 ,lowerCamelCase__ : int="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.0 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : List[str]=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=2 ,lowerCamelCase__ : Dict=1 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple=2 ,**lowerCamelCase__ : List[Any] ,):
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = d_model
UpperCAmelCase__ = ffn_dim
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = attention_heads
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = init_std
UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
| 98
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowerCamelCase_ = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowerCamelCase_ = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) ,reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__="binary" ,SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = fa_score(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE ,pos_label=__SCREAMING_SNAKE_CASE ,average=__SCREAMING_SNAKE_CASE ,sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 363
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[int] , a_ : Any ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForSequenceClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :int = downstream_dict['''projector.weight''']
__SCREAMING_SNAKE_CASE :List[Any] = downstream_dict['''projector.bias''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :Any = UniSpeechSatForAudioFrameClassification.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :List[str] = downstream_dict['''model.linear.weight''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : int ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = UniSpeechSatForXVector.from_pretrained(a_ , config=a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''connector.weight''']
__SCREAMING_SNAKE_CASE :Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__SCREAMING_SNAKE_CASE :str = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__SCREAMING_SNAKE_CASE :int = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__SCREAMING_SNAKE_CASE :Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__SCREAMING_SNAKE_CASE :Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__SCREAMING_SNAKE_CASE :Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__SCREAMING_SNAKE_CASE :Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__SCREAMING_SNAKE_CASE :str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowerCamelCase ( a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE :str = torch.load(a_ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE :str = checkpoint['''Downstream''']
__SCREAMING_SNAKE_CASE :str = UniSpeechSatConfig.from_pretrained(a_ )
__SCREAMING_SNAKE_CASE :List[str] = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_ )
__SCREAMING_SNAKE_CASE :Optional[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__SCREAMING_SNAKE_CASE :str = convert_classification(a_ , a_ , a_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__SCREAMING_SNAKE_CASE :Tuple = convert_diarization(a_ , a_ , a_ )
elif arch.endswith('''ForXVector''' ):
__SCREAMING_SNAKE_CASE :List[Any] = convert_xvector(a_ , a_ , a_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__SCREAMING_SNAKE_CASE :Dict = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 239
| 0
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : Optional[Any] ,A : Optional[int]=13 ,A : List[str]=7 ,A : List[str]=True ,A : str=True ,A : int=False ,A : List[Any]=True ,A : Tuple=99 ,A : Optional[int]=32 ,A : Optional[Any]=5 ,A : List[Any]=4 ,A : Optional[Any]=64 ,A : List[Any]="gelu" ,A : List[Any]=0.1 ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=5_12 ,A : Union[str, Any]=16 ,A : Optional[Any]=2 ,A : str=0.02 ,A : int=3 ,A : List[str]=4 ,A : List[str]=None ,A : Union[str, Any]=2 ,A : str=2 ,A : Dict=2 ,A : Optional[Any]=2 ,A : Any=4 ,A : Optional[Any]=1 ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = q_groups
__A = k_groups
__A = v_groups
__A = post_attention_groups
__A = intermediate_groups
__A = output_groups
def UpperCamelCase_ ( self : List[str] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : str ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def UpperCamelCase_ ( self : int ,A : List[str] ,A : Any ,A : int ,A : str ,A : int ,A : str ):
__A = SqueezeBertModel(config=A )
model.to(A )
model.eval()
__A = model(A ,A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : str ,A : Any ,A : Any ,A : Optional[int] ,A : int ,A : int ,A : Union[str, Any] ):
__A = SqueezeBertForMaskedLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : List[str] ,A : Optional[int] ,A : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : Dict ,A : int ):
__A = SqueezeBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,start_positions=A ,end_positions=A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : List[Any] ,A : Tuple ,A : Dict ,A : Optional[int] ,A : Dict ):
__A = self.num_labels
__A = SqueezeBertForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Dict ,A : Tuple ,A : int ,A : Any ,A : Union[str, Any] ,A : Union[str, Any] ,A : Any ):
__A = self.num_labels
__A = SqueezeBertForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ,A : int ,A : Optional[Any] ,A : Tuple ,A : List[Any] ,A : Tuple ):
__A = self.num_choices
__A = SqueezeBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Any ):
__A = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) = config_and_inputs
__A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case_ = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = True
snake_case_ = False
def UpperCamelCase_ ( self : str ):
__A = SqueezeBertModelTester(self )
__A = ConfigTester(self ,config_class=A ,dim=37 )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A )
def UpperCamelCase_ ( self : List[str] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A )
@slow
def UpperCamelCase_ ( self : List[str] ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = SqueezeBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : int ):
__A = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__A = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
__A = model(A )[0]
__A = torch.Size((1, 3) )
self.assertEqual(output.shape ,A )
__A = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(A ,A ,atol=1E-4 ) )
| 15
|
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if len(a_ ) <= 1:
return [tuple(a_ )]
__A = []
def generate(a_ , a_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , a_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
__A , __A = arr[k - 1], arr[i]
else: # k is odd
__A , __A = arr[k - 1], arr[0]
generate(k - 1 , a_ )
generate(len(a_ ) , a_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE :Dict = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 15
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
def _a ( lowerCamelCase: str ) -> YolosConfig:
'''simple docstring'''
__A = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__A = 1_92
__A = 7_68
__A = 12
__A = 3
__A = [8_00, 13_33]
__A = False
elif yolos_name == "yolos_s_dWr":
__A = 3_30
__A = 14
__A = 6
__A = 13_20
elif "yolos_s" in yolos_name:
__A = 3_84
__A = 15_36
__A = 12
__A = 6
elif "yolos_b" in yolos_name:
__A = [8_00, 13_44]
__A = 91
__A = '''huggingface/label-files'''
__A = '''coco-detection-id2label.json'''
__A = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(lowerCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
return config
def _a ( lowerCamelCase: dict , lowerCamelCase: YolosConfig , lowerCamelCase: bool = False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__A = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[: config.hidden_size, :]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[-config.hidden_size :, :]
__A = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase: str ) -> str:
'''simple docstring'''
if "backbone" in name:
__A = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
__A = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
__A = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
__A = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
__A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
__A = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__A = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
__A = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
__A = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
__A = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def _a ( lowerCamelCase: dict , lowerCamelCase: YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
__A = key.split('''.''' )
__A = int(key_split[2] )
__A = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__A = val[:dim, :]
__A = val[
dim : dim * 2, :
]
__A = val[-dim:, :]
else:
__A = val[:dim]
__A = val[dim : dim * 2]
__A = val[-dim:]
else:
__A = val
return orig_state_dict
def _a ( ) -> torch.Tensor:
'''simple docstring'''
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase: str , lowerCamelCase: str , lowerCamelCase: str , lowerCamelCase: bool = False ) -> List[str]:
'''simple docstring'''
__A = get_yolos_config(lowerCamelCase )
# load original state_dict
__A = torch.load(lowerCamelCase , map_location='''cpu''' )['''model''']
# load 🤗 model
__A = YolosForObjectDetection(lowerCamelCase )
model.eval()
__A = convert_state_dict(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__A = 8_00 if yolos_name != '''yolos_ti''' else 5_12
__A = YolosImageProcessor(format='''coco_detection''' , size=lowerCamelCase )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' )
__A = model(**lowerCamelCase )
__A , __A = outputs.logits, outputs.pred_boxes
__A , __A = None, None
if yolos_name == "yolos_ti":
__A = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__A = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__A = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__A = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__A = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__A = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__A = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__A = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__A = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__A = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
__A = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
__A = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase , organization='''hustvl''' )
model.push_to_hub(lowerCamelCase , organization='''hustvl''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case__ : Any = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 354
|
def _a ( lowerCamelCase: str ) -> bool:
'''simple docstring'''
__A = [int(lowerCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase ) == 4 and all(0 <= int(lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input().strip()
snake_case__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 250
| 0
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
SCREAMING_SNAKE_CASE__ = gray_code_sequence_string(__UpperCamelCase )
#
# convert them to integers
for i in range(len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = int(sequence[i] , 2 )
return sequence
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
SCREAMING_SNAKE_CASE__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
SCREAMING_SNAKE_CASE__ = gray_code_sequence_string(bit_count - 1 )
SCREAMING_SNAKE_CASE__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
SCREAMING_SNAKE_CASE__ = """0""" + smaller_sequence[i]
sequence.append(__UpperCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
SCREAMING_SNAKE_CASE__ = """1""" + smaller_sequence[i]
sequence.append(__UpperCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
|
__lowerCamelCase : List[Any] = 6_5521
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
for plain_chr in plain_text:
SCREAMING_SNAKE_CASE__ = (a + ord(__UpperCamelCase )) % MOD_ADLER
SCREAMING_SNAKE_CASE__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 219
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( UpperCAmelCase__ : Sequence[float] , UpperCAmelCase__ : float ) -> Union[str, Any]:
return sum(c * (x**i) for i, c in enumerate(a_ ) )
def lowerCamelCase ( UpperCAmelCase__ : Sequence[float] , UpperCAmelCase__ : float ) -> Tuple:
lowercase_ : str = 0.0
for coeff in reversed(a_ ):
lowercase_ : Tuple = result * x + coeff
return result
if __name__ == "__main__":
_lowercase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowercase : Optional[Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 367
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCamelCase ( UpperCAmelCase__ : list ) -> int:
if not postfix_notation:
return 0
lowercase_ : Any = {"""+""", """-""", """*""", """/"""}
lowercase_ : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase_ , lowercase_ : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
| 0
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase__ = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 68
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
lowerCAmelCase__ = {
'''moussaKam/mbarthez''': 1_024,
'''moussaKam/barthez''': 1_024,
'''moussaKam/barthez-orangesum-title''': 1_024,
}
lowerCAmelCase__ = '''▁'''
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a = None , **__a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
UpperCamelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
UpperCamelCase = len(self.sp_model ) - 1
UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def snake_case_ (self , __a , __a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , __a , __a = None , __a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case_ (self , __a , __a = None ) -> List[int]:
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ (self ) -> Any:
return len(self.sp_model )
def snake_case_ (self ) -> int:
UpperCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ (self , __a ) -> List[str]:
return self.sp_model.encode(__a , out_type=__a )
def snake_case_ (self , __a ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase = self.sp_model.PieceToId(__a )
return spm_id if spm_id else self.unk_token_id
def snake_case_ (self , __a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__a )
def snake_case_ (self , __a ) -> Union[str, Any]:
UpperCamelCase = []
UpperCamelCase = ""
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(__a )
UpperCamelCase = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def __getstate__(self ) -> str:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__(self , __a ) -> Optional[int]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 153
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : str = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase ):
_a : Optional[int]= "lilt"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=0 ,snake_case="absolute" ,snake_case=None ,snake_case=4 ,snake_case=1024 ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,**snake_case )
lowercase : str = vocab_size
lowercase : Tuple = hidden_size
lowercase : int = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Tuple = hidden_act
lowercase : List[str] = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Dict = type_vocab_size
lowercase : Any = initializer_range
lowercase : str = layer_norm_eps
lowercase : Optional[Any] = position_embedding_type
lowercase : List[str] = classifier_dropout
lowercase : List[str] = channel_shrink_ratio
lowercase : Optional[Any] = max_ad_position_embeddings
| 285
|
from collections.abc import Generator
def _snake_case( ) -> Generator[int, None, None]:
lowercase , lowercase : List[str] = 0, 1
while True:
lowercase , lowercase : Optional[int] = b, a + b
yield b
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000 ) -> int:
lowercase : List[Any] = 1
lowercase : Tuple = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 285
| 1
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase__ : int = 1
lowercase__ : str = 1
while repunit:
lowercase__ : Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 ):
'''simple docstring'''
lowercase__ : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 214
|
from __future__ import annotations
_lowercase: Tuple = list[list[int]]
# assigning initial values to the grid
_lowercase: Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowercase: Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a( A : Matrix , A : int , A : int , A : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a( A : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a( A : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(A ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
a = digit
if sudoku(A ) is not None:
return grid
a = 0
return None
def a( A : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(A , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_lowercase: List[str] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 227
| 0
|
import math
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(a__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('''This should never happen''' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__UpperCamelCase : List[Any] = '''Enter the base and the power separated by a comma: '''
__UpperCamelCase : Dict = map(int, input(prompt).split(''','''))
__UpperCamelCase : Union[str, Any] = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__UpperCamelCase : Tuple = res(xa, ya)
__UpperCamelCase : List[str] = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 369
|
"""simple docstring"""
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__UpperCamelCase : List[Any] = {
'''facebook/blenderbot_small-90M''': 5_1_2,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = BlenderbotSmallTokenizer
def __init__( self : Dict ,lowercase_ : Dict=None ,lowercase_ : Union[str, Any]=None ,lowercase_ : Any="<|endoftext|>" ,lowercase_ : Optional[Any]="<|endoftext|>" ,lowercase_ : Dict="<|endoftext|>" ,lowercase_ : Optional[int]=False ,lowercase_ : Union[str, Any]=True ,**lowercase_ : Union[str, Any] ,):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ ,merges=lowercase_ ,add_prefix_space=lowercase_ ,trim_offsets=lowercase_ ,) ,bos_token=lowercase_ ,eos_token=lowercase_ ,unk_token=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ : Tuple = add_prefix_space
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : int=None ):
lowerCAmelCase__ : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : List[int] ,lowercase_ : Optional[List[int]] = None ):
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 74
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = a.name
__UpperCAmelCase : str = b.name
__UpperCAmelCase : Optional[int] = """"""
__UpperCAmelCase : Tuple = """"""
__UpperCAmelCase : int = a == b
__UpperCAmelCase : List[Any] = name_a
__UpperCAmelCase : Dict = name_b
return res
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_A , _A )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
_graph_replace_input_with(node_proto.attribute[1].g , _A , _A )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _A , _A )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_A , _A , _A )
def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = list(model.graph.initializer )
__UpperCAmelCase : Union[str, Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__UpperCAmelCase : Tuple = inits[i].name
__UpperCAmelCase : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _A , _A )
def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = os.path.dirname(_A )
__UpperCAmelCase : str = os.path.basename(_A )
__UpperCAmelCase : str = onnx.load(os.path.join(_A , _A ) )
__UpperCAmelCase : List[Any] = list(model.graph.initializer )
__UpperCAmelCase : Any = set()
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : List[str] = 0
for i in range(len(_A ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_A ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_A )
dup_set.add(_A )
__UpperCAmelCase : List[Any] = inits[j].data_type
__UpperCAmelCase : Optional[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("""unexpected data type: """ , _A )
total_reduced_size += mem_size
__UpperCAmelCase : Union[str, Any] = inits[i].name
__UpperCAmelCase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_A )
else:
__UpperCAmelCase : Optional[Any] = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" )
__UpperCAmelCase : int = sorted(_A )
_remove_dup_initializers_from_model(_A , _A , _A )
__UpperCAmelCase : Optional[Any] = """optimized_""" + model_file_name
__UpperCAmelCase : Tuple = os.path.join(_A , _A )
onnx.save(_A , _A )
return new_model
| 115
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "trajectory_transformer"
a = ["past_key_values"]
a = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , __lowerCamelCase : Any=100 , __lowerCamelCase : str=5 , __lowerCamelCase : int=1 , __lowerCamelCase : Tuple=1 , __lowerCamelCase : List[Any]=249 , __lowerCamelCase : List[str]=6 , __lowerCamelCase : Dict=17 , __lowerCamelCase : str=25 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Dict=128 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=0.0006 , __lowerCamelCase : Any=512 , __lowerCamelCase : List[Any]=0.02 , __lowerCamelCase : Tuple=1e-12 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Any=True , __lowerCamelCase : List[str]=1 , __lowerCamelCase : Tuple=5_0256 , __lowerCamelCase : Dict=5_0256 , **__lowerCamelCase : str , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = action_weight
SCREAMING_SNAKE_CASE__ = reward_weight
SCREAMING_SNAKE_CASE__ = value_weight
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = block_size
SCREAMING_SNAKE_CASE__ = action_dim
SCREAMING_SNAKE_CASE__ = observation_dim
SCREAMING_SNAKE_CASE__ = transition_dim
SCREAMING_SNAKE_CASE__ = learning_rate
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ = use_cache
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
| 314
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {'vocab_file': 'sentencepiece.model'}
snake_case__ : str = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
snake_case__ : Optional[int] = {
'google/rembert': 256,
}
class A_ ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self :Any , _UpperCamelCase :Optional[int] , _UpperCamelCase :Tuple=False , _UpperCamelCase :str=True , _UpperCamelCase :Any=True , _UpperCamelCase :Dict="[CLS]" , _UpperCamelCase :Optional[Any]="[SEP]" , _UpperCamelCase :List[Any]="[UNK]" , _UpperCamelCase :Optional[Any]="[SEP]" , _UpperCamelCase :Union[str, Any]="[PAD]" , _UpperCamelCase :List[Any]="[CLS]" , _UpperCamelCase :Optional[Any]="[MASK]" , **_UpperCamelCase :List[Any] , )-> List[Any]:
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = spm.SentencePieceProcessor()
self.sp_model.Load(_UpperCamelCase )
@property
def _lowerCAmelCase (self :int )-> List[Any]:
return len(self.sp_model )
def _lowerCAmelCase (self :Optional[Any] )-> Optional[Any]:
__A = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self :List[str] )-> str:
__A = self.__dict__.copy()
__A = None
return state
def __setstate__(self :Optional[Any] , _UpperCamelCase :List[str] )-> str:
__A = d
__A = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase (self :Dict , _UpperCamelCase :List[Any] , _UpperCamelCase :str=False )-> List[str]:
__A = self.sp_model.EncodeAsPieces(_UpperCamelCase )
return pieces
def _lowerCAmelCase (self :Dict , _UpperCamelCase :Any )-> str:
return self.sp_model.PieceToId(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :Dict )-> Dict:
return self.sp_model.IdToPiece(_UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :int )-> str:
__A = self.sp_model.decode_pieces(_UpperCamelCase )
return out_string
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def _lowerCAmelCase (self :str , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCamelCase ) )
return
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,)
| 356
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Dict )-> Dict:
__A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
__A = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__A = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
__A = -(labels.shape[-1] * loss.item())
__A = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 250
| 0
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
_SCREAMING_SNAKE_CASE : Any = '''scheduler_config.json'''
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : Any = 4
SCREAMING_SNAKE_CASE : List[str] = 5
SCREAMING_SNAKE_CASE : List[Any] = 6
SCREAMING_SNAKE_CASE : str = 7
SCREAMING_SNAKE_CASE : Union[str, Any] = 8
SCREAMING_SNAKE_CASE : Any = 9
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_0
SCREAMING_SNAKE_CASE : Optional[Any] = 1_1
SCREAMING_SNAKE_CASE : Tuple = 1_2
SCREAMING_SNAKE_CASE : Optional[Any] = 1_3
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_4
@dataclass
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : torch.FloatTensor
class a :
SCREAMING_SNAKE_CASE : Union[str, Any] = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Any = True
@classmethod
def UpperCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] = None , __SCREAMING_SNAKE_CASE : Optional[str] = None , __SCREAMING_SNAKE_CASE : Optional[Any]=False , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = cls.load_config(
pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , subfolder=__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , return_commit_hash=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
return cls.from_config(__SCREAMING_SNAKE_CASE , return_unused_kwargs=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Any ) -> int:
self.save_config(save_directory=__SCREAMING_SNAKE_CASE , push_to_hub=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self : str ) -> Optional[Any]:
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls : List[Any] ) -> Dict:
lowerCamelCase_ = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase_ = importlib.import_module(__name__.split('.' )[0] )
lowerCamelCase_ = [
getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
return compatible_classes
| 183
|
"""simple docstring"""
from typing import Any
class a :
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
lowerCamelCase_ = data
lowerCamelCase_ = None
class a :
def __init__( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ = None
def UpperCamelCase ( self : Dict ) -> Optional[int]:
lowerCamelCase_ = self.head
while temp is not None:
print(temp.data , end=' ' )
lowerCamelCase_ = temp.next
print()
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
lowerCamelCase_ = Node(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.head
lowerCamelCase_ = new_node
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
if node_data_a == node_data_a:
return
else:
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
lowerCamelCase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowerCamelCase_ = node_a.next
if node_a is None or node_a is None:
return
lowerCamelCase_ , lowerCamelCase_ = node_a.data, node_a.data
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 183
| 1
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase__ = False, False, False
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : Optional[int] = None
a_ : bool = True
a_ : bool = True
a_ : Optional[str] = None
# Automatically constructed
a_ : ClassVar[str] = "dict"
a_ : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
a_ : str = field(default="""Audio""" , init=_lowerCAmelCase , repr=_lowerCAmelCase )
def __call__( self : Optional[int] ):
return self.pa_type
def lowerCamelCase ( self : Any , a_ : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install \'soundfile\'." ) from err
if isinstance(_lowercase , _lowercase ):
return {"bytes": None, "path": value}
elif isinstance(_lowercase , _lowercase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowerCAmelCase_ : Optional[Any] = BytesIO()
sf.write(_lowercase , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowerCAmelCase_ : Optional[int] = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
lowerCAmelCase_ : Union[str, Any] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67
lowerCAmelCase_ : Optional[Any] = BytesIO(bytes() )
sf.write(_lowercase , _lowercase , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase ( self : Dict , a_ : dict , a_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'." ) from err
lowerCAmelCase_ : str = xsplitext(_lowercase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
lowerCAmelCase_ : Optional[int] = token_per_repo_id or {}
lowerCAmelCase_ : List[str] = path.split("::" )[-1]
try:
lowerCAmelCase_ : Optional[Any] = string_to_dict(_lowercase , config.HUB_DATASETS_URL )["repo_id"]
lowerCAmelCase_ : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowerCAmelCase_ : List[Any] = None
with xopen(_lowercase , "rb" , use_auth_token=_lowercase ) as f:
lowerCAmelCase_ , lowerCAmelCase_ : Any = sf.read(_lowercase )
else:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = sf.read(_lowercase )
lowerCAmelCase_ : Optional[int] = array.T
if self.mono:
lowerCAmelCase_ : Dict = librosa.to_mono(_lowercase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowerCAmelCase_ : Dict = librosa.resample(_lowercase , orig_sr=_lowercase , target_sr=self.sampling_rate )
lowerCAmelCase_ : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase ( self : int ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase ( self : Tuple , a_ : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
lowerCAmelCase_ : Optional[Any] = pa.array([None] * len(_lowercase ) , type=pa.binary() )
lowerCAmelCase_ : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCAmelCase_ : Any = pa.array([None] * len(_lowercase ) , type=pa.string() )
lowerCAmelCase_ : int = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
lowerCAmelCase_ : int = pa.array([Audio().encode_example(_lowercase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCAmelCase_ : Union[str, Any] = storage.field("bytes" )
else:
lowerCAmelCase_ : Optional[Any] = pa.array([None] * len(_lowercase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCAmelCase_ : Optional[Any] = storage.field("path" )
else:
lowerCAmelCase_ : Any = pa.array([None] * len(_lowercase ) , type=pa.string() )
lowerCAmelCase_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_lowercase , self.pa_type )
def lowerCamelCase ( self : Tuple , a_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(a_ : int ):
with xopen(_lowercase , "rb" ) as f:
lowerCAmelCase_ : str = f.read()
return bytes_
lowerCAmelCase_ : List[str] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCAmelCase_ : Optional[int] = pa.array(
[os.path.basename(_lowercase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCAmelCase_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_lowercase , self.pa_type )
| 370
|
"""simple docstring"""
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
if not is_accelerate_available():
return method
lowerCAmelCase_ : Union[str, Any] = version.parse(accelerate.__version__ ).base_version
if version.parse(__UpperCamelCase ) < version.parse("0.17.0" ):
return method
def wrapper(self , *__UpperCamelCase , **__UpperCamelCase ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *__UpperCamelCase , **__UpperCamelCase )
return wrapper
| 161
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCamelCase ( _A : jnp.ndarray , _A : int , _A : float = 1 , _A : float = 1 , _A : float = 1.0E4 , _A : bool = False , _A : float = 1.0 , ) ->jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'Embedding dimension {embedding_dim} should be even'
lowerCamelCase_ =float(embedding_dim // 2 )
lowerCamelCase_ =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase_ =min_timescale * jnp.exp(jnp.arange(__A , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase_ =jnp.expand_dims(__A , 1 ) * jnp.expand_dims(__A , 0 )
# scale embeddings
lowerCamelCase_ =scale * emb
if flip_sin_to_cos:
lowerCamelCase_ =jnp.concatenate([jnp.cos(__A ), jnp.sin(__A )] , axis=1 )
else:
lowerCamelCase_ =jnp.concatenate([jnp.sin(__A ), jnp.cos(__A )] , axis=1 )
lowerCamelCase_ =jnp.reshape(__A , [jnp.shape(__A )[0], embedding_dim] )
return signal
class _SCREAMING_SNAKE_CASE ( nn.Module):
_UpperCamelCase:int = 32
_UpperCamelCase:jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ =nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ =nn.silu(SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ =nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(SCREAMING_SNAKE_CASE__ )
return temb
class _SCREAMING_SNAKE_CASE ( nn.Module):
_UpperCamelCase:int = 32
_UpperCamelCase:bool = False
_UpperCamelCase:float = 1
@nn.compact
def __call__( self , _SCREAMING_SNAKE_CASE )-> Tuple:
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 154
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = '''facebook/nllb-200-distilled-600M'''
snake_case__ : Union[str, Any] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
snake_case__ : Optional[Any] = '''translator'''
snake_case__ : Tuple = AutoTokenizer
snake_case__ : Union[str, Any] = AutoModelForSeqaSeqLM
snake_case__ : Dict = LANGUAGE_CODES
snake_case__ : str = ['''text''', '''text''', '''text''']
snake_case__ : Tuple = ['''text''']
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
a_ : str = self.lang_to_code[src_lang]
a_ : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
SCREAMING_SNAKE_CASE__ , return_tensors='pt' , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
return self.model.generate(**SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
| 32
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowerCamelCase (UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : bool = False ):
if radian_mode:
return [magnitude * cos(UpperCAmelCase__ ), magnitude * sin(UpperCAmelCase__ )]
return [magnitude * cos(radians(UpperCAmelCase__ ) ), magnitude * sin(radians(UpperCAmelCase__ ) )]
def __lowerCamelCase (UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : NDArray[floataa] , UpperCAmelCase__ : float = 1_0**-1 ):
SCREAMING_SNAKE_CASE = cross(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = sum(UpperCAmelCase__ )
return abs(UpperCAmelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
_lowerCamelCase : Dict = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
_lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowerCamelCase : Optional[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
_lowerCamelCase : Tuple = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowerCamelCase : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
_lowerCamelCase : Any = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 367
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __snake_case( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Union[str, Any] , **_UpperCamelCase : str ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : List[Any] , **_UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __snake_case( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCamelCase )
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCamelCase )
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=_UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="np" )
SCREAMING_SNAKE_CASE = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __snake_case( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __snake_case( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ChineseCLIPProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
SCREAMING_SNAKE_CASE = "Alexandra,T-shirt的价格是15便士。"
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 206
| 0
|
import os
SCREAMING_SNAKE_CASE__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
__lowercase = 0
__lowercase = 0
while index < len(_UpperCamelCase ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> str:
__lowercase = ''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "/p089_roman.txt" ) -> Tuple:
__lowercase = 0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(_UpperCamelCase )
__lowercase = generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 330
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
_UpperCAmelCase = sd_pipe.to(__lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sd_pipe.to(__lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase)
sd_pipe.set_scheduler('sample_euler')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe([prompt] , generator=__lowerCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np')
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-1
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
_UpperCAmelCase = sd_pipe.to(__lowerCamelCase)
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase)
sd_pipe.set_scheduler('sample_dpmpp_2m')
_UpperCAmelCase = 'A painting of a squirrel eating a burger'
_UpperCAmelCase = torch.manual_seed(0)
_UpperCAmelCase = sd_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=__lowerCamelCase , )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 367
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =(DPMSolverSDEScheduler,)
snake_case_ =10
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCamelCase )
return config
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Dict = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.prev_sample
lowerCAmelCase__ : List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase__ : List[Any] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ : Tuple = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = output.prev_sample
lowerCAmelCase__ : Any = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : str = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = self.dummy_model()
lowerCAmelCase__ : List[Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : List[Any] = output.prev_sample
lowerCAmelCase__ : List[str] = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : Dict = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Union[str, Any] = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
lowerCAmelCase__ : Union[str, Any] = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
lowerCAmelCase__ : List[Any] = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : Tuple = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : str = output.prev_sample
lowerCAmelCase__ : Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
| 129
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='''utf-8''' ,check=__lowerCamelCase ,)
assert hasattr(self ,'''env''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
lowerCAmelCase__ : Optional[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=__lowerCamelCase ,instance_count=__lowerCamelCase ,instance_type=self.instance_type ,debugger_hook_config=__lowerCamelCase ,hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=__lowerCamelCase ,py_version='''py36''' ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase__ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" ,'''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} ,__lowerCamelCase )
| 129
| 1
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowercase: Any = numpy.array([0, 0])
__lowercase: Dict = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__lowercase: int = numpy.array([1, 0])
__lowercase: str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase__ = initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase__ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCamelCase__ = vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] ) -> numpy.ndarray:
'''simple docstring'''
UpperCamelCase__ = numpy.radians(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCamelCase__ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCamelCase__ = zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = num_patches + 1
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase = (self.image_size, self.image_size)
UpperCamelCase = (self.patch_size, self.patch_size)
UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxViTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 343
| 0
|
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(1_0_0_0 - i, -1_0_0_0 - i, -1 ) ) for i in range(1_0_0_0 )]
UpperCamelCase__ : Optional[int] = generate_large_matrix()
UpperCamelCase__ : List[str] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> None:
"""simple docstring"""
assert all(row == sorted(snake_case_, reverse=snake_case_ ) for row in grid )
assert all(list(snake_case_ ) == sorted(snake_case_, reverse=snake_case_ ) for col in zip(*snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = 0
a = len(snake_case_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
a = (left + right) // 2
a = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
a = mid + 1
else:
a = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = 0
a = len(grid[0] )
for i in range(len(snake_case_ ) ):
a = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case_ ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
a = 0
for row in grid:
for i, number in enumerate(snake_case_ ):
if number < 0:
total += len(snake_case_ ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Running benchmarks''' )
a = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
a = timeit(f"""{func}(grid=grid)""", setup=snake_case_, number=5_0_0 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 362
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
UpperCamelCase__ : Optional[Any] = """bert-base-cased"""
UpperCamelCase__ : int = """fp16"""
UpperCamelCase__ : str = """bf16"""
UpperCamelCase__ : List[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
super().setUp()
a = dict(
ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = F"""{i + 1}"""
a = strategy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = prefetch_policy
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__lowerCamelCase ):
a = self.dist_env.copy()
a = state_dict_type
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = AutoModel.from_pretrained(__lowerCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
a = self.dist_env.copy()
a = policy
if policy == "TRANSFORMER_BASED_WRAP":
a = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
a = '''2000'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
a = self.dist_env.copy()
a = '''TRANSFORMER_BASED_WRAP'''
a = '''T5Layer'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
with self.assertRaises(__lowerCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
a = self.dist_env.copy()
a = '''SIZE_BASED_WRAP'''
a = '''0'''
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
a = self.dist_env.copy()
a = mp_dtype
with mockenv_context(**__lowerCamelCase ):
a = Accelerator()
if mp_dtype == "fp16":
a = torch.floataa
elif mp_dtype == "bf16":
a = torch.bfloataa
a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
a = self.dist_env.copy()
a = str(__lowerCamelCase ).lower()
with mockenv_context(**__lowerCamelCase ):
a = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowerCamelCase_ ( a_ ):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a = 0.82
a = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
a = {
'''multi_gpu_fp16''': 32_00,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
a = 1_60
a = 1_60
a = inspect.getfile(accelerate.test_utils )
a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' )
a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
a = cmd.copy()
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__lowerCamelCase ):
a = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
a = len(__lowerCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
a = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
a = cmd_config[:-1]
a = os.path.join(self.tmpdir ,'''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' )
a = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
a = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__lowerCamelCase ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
| 330
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 85
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 85
| 1
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
_a : Union[str, Any] = xgb.predict(UpperCamelCase__ )
_a : Optional[int] = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Optional[Any] = fetch_california_housing()
_a , _a : List[str] = data_handling(UpperCamelCase__ )
_a , _a , _a , _a : Optional[int] = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
_a : str = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 324
|
"""simple docstring"""
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
def _lowercase ( self : int ) -> int:
_a : Optional[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : str = self.tool("""hey""" )
_a : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
_a : int = self.tool("""hey""" )
_a : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 324
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_a = 8
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int]=BITS ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = x.device
__lowerCAmelCase: List[Any] = (x * 2_55).int().clamp(0 , 2_55 )
__lowerCAmelCase: Any = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: Tuple = rearrange(SCREAMING_SNAKE_CASE , 'b c h w -> b c 1 h w' )
__lowerCAmelCase: Dict = ((x & mask) != 0).float()
__lowerCAmelCase: Dict = rearrange(SCREAMING_SNAKE_CASE , 'b c d h w -> b (c d) h w' )
__lowerCAmelCase: str = bits * 2 - 1
return bits
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=BITS ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase: str = x.device
__lowerCAmelCase: int = (x > 0).int()
__lowerCAmelCase: Tuple = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE , dtype=torch.intaa )
__lowerCAmelCase: Union[str, Any] = rearrange(SCREAMING_SNAKE_CASE , 'd -> d 1 1' )
__lowerCAmelCase: int = rearrange(SCREAMING_SNAKE_CASE , 'b (c d) h w -> b c d h w' , d=8 )
__lowerCAmelCase: List[Any] = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _a ( self : List[str] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Dict:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase: List[Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase: List[Any] = self.alphas_cumprod[timestep]
__lowerCAmelCase: Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase: int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase: Union[str, Any] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: List[Any] = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowerCAmelCase: int = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase: Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase: Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase: Union[str, Any] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE ) else 'cpu'
__lowerCAmelCase: Any = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[Any] = self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ** 0.5 * eta * noise
__lowerCAmelCase: int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Tuple="epsilon" , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase: Optional[Any] = torch.split(SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
__lowerCAmelCase: Tuple = None
# 1. compute alphas, betas
__lowerCAmelCase: Optional[int] = self.alphas_cumprod[t]
__lowerCAmelCase: int = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase: List[str] = 1 - alpha_prod_t
__lowerCAmelCase: List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase: int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase: Dict = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__lowerCAmelCase: List[str] = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase: Union[str, Any] = torch.clamp(SCREAMING_SNAKE_CASE , -scale , SCREAMING_SNAKE_CASE )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Union[str, Any] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase: str = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase: Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase: Tuple = 0
if t > 0:
__lowerCAmelCase: Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE ).to(model_output.device )
__lowerCAmelCase: str = (self._get_variance(SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
__lowerCAmelCase: Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
class A_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase : Optional[float] = 1.0 , ) -> int:
super().__init__()
__lowerCAmelCase: Optional[int] = bit_scale
__lowerCAmelCase: int = (
ddim_bit_scheduler_step if isinstance(_snake_case , _snake_case ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : List[str] , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 2_5_6 , UpperCAmelCase : Optional[int] = 5_0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : Tuple , ) -> Union[Tuple, ImagePipelineOutput]:
__lowerCAmelCase: int = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_snake_case , )
__lowerCAmelCase: Tuple = decimal_to_bits(_snake_case ) * self.bit_scale
__lowerCAmelCase: Optional[Any] = latents.to(self.device )
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase: Union[str, Any] = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase: int = self.scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
__lowerCAmelCase: Optional[int] = bits_to_decimal(_snake_case )
if output_type == "pil":
__lowerCAmelCase: Any = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 322
|
from collections import deque
from .hash_table import HashTable
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any], *_snake_case : Optional[Any], **_snake_case : List[Any] ) ->Optional[int]:
super().__init__(*_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : Tuple, _snake_case : Dict ) ->Dict:
snake_case__ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
snake_case__ : Dict = self.values[key]
def lowercase_ ( self : Any ) ->Optional[Any]:
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase_ ( self : Union[str, Any], _snake_case : str, _snake_case : Optional[int]=None ) ->Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case, _snake_case )
| 277
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCAmelCase : str = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ) -> Optional[int]:
A_ : Optional[int] = state_dict.pop(lowercase__ )
A_ : List[str] = val
def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> int:
A_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : Union[str, Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A_ : Dict = value
else:
A_ : Union[str, Any] = value
return new_state_dict
def __snake_case ( _lowerCAmelCase : Any ) -> Union[str, Any]:
A_ : List[Any] = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Optional[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A_ : str = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:256, :]
A_ : Tuple = in_proj_bias[:256]
A_ : Tuple = in_proj_weight[256:512, :]
A_ : List[Any] = in_proj_bias[256:512]
A_ : int = in_proj_weight[-256:, :]
A_ : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Dict = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A_ : List[str] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Union[str, Any] = in_proj_weight[:256, :]
A_ : Dict = in_proj_bias[:256]
A_ : Optional[Any] = in_proj_weight[256:512, :]
A_ : List[Any] = in_proj_bias[256:512]
A_ : Any = in_proj_weight[-256:, :]
A_ : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A_ : Union[str, Any] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Optional[int] = in_proj_weight_cross_attn[:256, :]
A_ : str = in_proj_bias_cross_attn[:256]
A_ : List[Any] = in_proj_weight_cross_attn[256:512, :]
A_ : Optional[int] = in_proj_bias_cross_attn[256:512]
A_ : Any = in_proj_weight_cross_attn[-256:, :]
A_ : List[Any] = in_proj_bias_cross_attn[-256:]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ) -> Tuple:
A_ : Tuple = image.size
A_ : Any = max(lowercase__ , lowercase__ )
A_ : Optional[Any] = 800 if """detection""" in checkpoint_url else 1000
A_ : int = target_max_size / current_max_size
A_ : Optional[Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __snake_case ( _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : List[str] = F.to_tensor(lowercase__ )
A_ : List[str] = F.normalize(lowercase__ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Tuple:
logger.info("Converting model..." )
# load original state dict
A_ : int = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
A_ : Tuple = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : Optional[Any] = """model."""
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A_ : Union[str, Any] = state_dict.pop(lowercase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : List[Any] = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : str = 15
A_ : Any = 2
A_ : Union[str, Any] = {0: """table""", 1: """table rotated"""}
A_ : List[Any] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
else:
A_ : int = 125
A_ : Optional[Any] = 6
A_ : Optional[int] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
A_ : Optional[int] = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
A_ : int = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
A_ : Any = TableTransformerForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# verify our conversion
A_ : Union[str, Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
A_ : Union[str, Any] = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=lowercase__ )
A_ : Union[str, Any] = Image.open(lowercase__ ).convert("RGB" )
A_ : int = normalize(resize(lowercase__ , lowercase__ ) ).unsqueeze(0 )
A_ : List[str] = model(lowercase__ )
if "detection" in checkpoint_url:
A_ : List[str] = (1, 15, 3)
A_ : Optional[Any] = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A_ : Union[str, Any] = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A_ : List[Any] = (1, 125, 7)
A_ : Dict = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A_ : Tuple = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A_ : Dict = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(lowercase__ )
image_processor.push_to_hub(lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 351
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 70
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
_a = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
_UpperCAmelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __lowerCAmelCase , )
is not None
):
_UpperCAmelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_UpperCAmelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_UpperCAmelCase = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
_UpperCAmelCase = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
_UpperCAmelCase = True
if not attribute_used:
_UpperCAmelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_UpperCAmelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_UpperCAmelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_UpperCAmelCase = True
elif attribute.endswith('_token_id' ):
_UpperCAmelCase = True
# configuration class specific cases
if not case_allowed:
_UpperCAmelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
_UpperCAmelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def __A ( __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dict(inspect.signature(config_class.__init__ ).parameters )
_UpperCAmelCase = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
_UpperCAmelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_UpperCAmelCase = {}
if len(config_class.attribute_map ) > 0:
_UpperCAmelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_UpperCAmelCase = inspect.getsourcefile(__lowerCAmelCase )
_UpperCAmelCase = os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_UpperCAmelCase = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('modeling_' )]
# Get the source code strings
_UpperCAmelCase = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
_UpperCAmelCase = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
_UpperCAmelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def __A ( )-> int:
"""simple docstring"""
_UpperCAmelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_UpperCAmelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
_UpperCAmelCase = check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = unused_attributes
if len(__lowerCAmelCase ) > 0:
_UpperCAmelCase = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 39
|
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
if not isinstance(snake_case__ ,snake_case__ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_SCREAMING_SNAKE_CASE = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : Optional[int] = MBartConfig
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : int = "gelu"
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ):
A_ : List[Any] = parent
A_ : Tuple = batch_size
A_ : Any = seq_length
A_ : Tuple = is_training
A_ : Any = use_labels
A_ : List[Any] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : str = num_attention_heads
A_ : Tuple = intermediate_size
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Union[str, Any] = eos_token_id
A_ : Any = pad_token_id
A_ : Any = bos_token_id
def _a (self ):
A_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A_ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A_ : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ : List[Any] = prepare_mbart_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def _a (self , lowercase , lowercase ):
A_ : Dict = TFMBartModel(config=__UpperCAmelCase ).get_decoder()
A_ : Dict = inputs_dict["""input_ids"""]
A_ : List[str] = input_ids[:1, :]
A_ : Any = inputs_dict["""attention_mask"""][:1, :]
A_ : Tuple = inputs_dict["""head_mask"""]
A_ : Tuple = 1
# first forward pass
A_ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
A_ : Optional[int] = outputs.to_tuple()
A_ : str = past_key_values[1]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
A_ : Any = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A_ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _a (self ):
A_ : Optional[int] = TFMBartModelTester(self )
A_ : int = ConfigTester(self , config_class=__UpperCAmelCase )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = [
" UN Chief Says There Is No Military Solution in Syria",
]
__SCREAMING_SNAKE_CASE : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__SCREAMING_SNAKE_CASE : Any = "facebook/mbart-large-en-ro"
@cached_property
def _a (self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a (self ):
A_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a (self , **lowercase ):
A_ : Optional[Any] = self.translate_src_text(**__UpperCAmelCase )
self.assertListEqual(self.expected_text , __UpperCAmelCase )
def _a (self , **lowercase ):
A_ : Any = self.tokenizer(self.src_text , **__UpperCAmelCase , return_tensors="""tf""" )
A_ : Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
A_ : Dict = self.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
return generated_words
@slow
def _a (self ):
self._assert_generated_batch_equal_expected()
| 366
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase :Tuple = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
lowerCamelCase :str = '''▁'''
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , lowercase = None , lowercase=True , **lowercase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A_ : Any = [F'<extra_id_{i}>' for i in range(lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : Tuple = len(set(filter(lambda lowercase : bool("""extra_id""" in str(lowercase ) ) , lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
A_ : str = legacy
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase , **lowercase , )
A_ : List[str] = vocab_file
A_ : Tuple = extra_ids
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@staticmethod
def _a (lowercase , lowercase , lowercase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A_ : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , lowercase , )
return max_model_length
@property
def _a (self ):
return self.sp_model.get_piece_size() + self._extra_ids
def _a (self ):
A_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase )) + [1]
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def _a (self ):
return list(
set(filter(lambda lowercase : bool(re.search(R"""<extra_id_\d+>""" , lowercase ) ) is not None , self.additional_special_tokens ) ) )
def _a (self ):
return [self._convert_token_to_id(lowercase ) for token in self.get_sentinel_tokens()]
def _a (self , lowercase ):
if len(lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = self._add_eos_if_not_present(lowercase )
if token_ids_a is None:
return token_ids_a
else:
A_ : List[Any] = self._add_eos_if_not_present(lowercase )
return token_ids_a + token_ids_a
def __getstate__(self ):
A_ : int = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Dict = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase , **lowercase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
A_ : Tuple = SPIECE_UNDERLINE + text.replace(lowercase , """ """ )
return super().tokenize(lowercase , **lowercase )
def _a (self , lowercase , **lowercase ):
if not self.legacy:
A_ : Dict = text.startswith(lowercase )
if is_first:
A_ : str = text[1:]
A_ : Optional[int] = self.sp_model.encode(lowercase , out_type=lowercase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(lowercase ):
A_ : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _a (self , lowercase ):
if token.startswith("""<extra_id_""" ):
A_ : Union[str, Any] = re.match(R"""<extra_id_(\d+)>""" , lowercase )
A_ : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase )
def _a (self , lowercase ):
if index < self.sp_model.get_piece_size():
A_ : List[Any] = self.sp_model.IdToPiece(lowercase )
else:
A_ : Dict = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def _a (self , lowercase ):
A_ : Union[str, Any] = []
A_ : int = """"""
A_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(lowercase )
A_ : Optional[Any] = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Optional[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 135
| 0
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : List[Any] = args.log_outputs
_snake_case : Union[str, Any] = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_snake_case : str = load_metric("""wer""" )
_snake_case : List[str] = load_metric("""cer""" )
# compute metrics
_snake_case : List[str] = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
_snake_case : Dict = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
_snake_case : Optional[int] = F"WER: {wer_result}\nCER: {cer_result}"
print(__SCREAMING_SNAKE_CASE )
with open(F"{dataset_id}_eval_results.txt" , """w""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_snake_case : Tuple = F"log_{dataset_id}_predictions.txt"
_snake_case : Optional[Any] = F"log_{dataset_id}_targets.txt"
with open(__SCREAMING_SNAKE_CASE , """w""" ) as p, open(__SCREAMING_SNAKE_CASE , """w""" ) as t:
# mapping function to write output
def write_to_file(snake_case__ : str , snake_case__ : int ):
p.write(F"{i}" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F"{i}" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(__SCREAMING_SNAKE_CASE , with_indices=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] ):
"""simple docstring"""
_snake_case : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_snake_case : Optional[Any] = re.sub(__SCREAMING_SNAKE_CASE , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_snake_case : Dict = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_snake_case : List[Any] = """ """.join(text.split(__SCREAMING_SNAKE_CASE ) )
return text
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_snake_case : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
_snake_case : str = feature_extractor.sampling_rate
# resample audio
_snake_case : Any = dataset.cast_column("""audio""" , Audio(sampling_rate=__SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
_snake_case : Dict = 0 if torch.cuda.is_available() else -1
_snake_case : Dict = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ : str ):
_snake_case : Any = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_snake_case : int = prediction["""text"""]
_snake_case : int = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_snake_case : str = dataset.map(__SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
A_ = parser.parse_args()
main(args)
| 64
|
from manim import *
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('CPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('GPU' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Model' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
rect.set_stroke(snake_case )
lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case , buff=0.0 )
self.add(snake_case )
model_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case , *snake_case )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Loaded Checkpoint' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case )
lowercase = []
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = fill.copy().set_fill(snake_case , opacity=0.7 )
target.move_to(snake_case )
ckpt_arr.append(snake_case )
lowercase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case )
self.add(*snake_case , *snake_case )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case , snake_case )
lowercase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case )
lowercase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(*snake_case ).arrange(snake_case , buff=0 )
lowercase = VGroup(snake_case , snake_case ).arrange(snake_case , buff=0 )
lowercase = Text('Disk' , font_size=24 )
lowercase = Group(snake_case , snake_case ).arrange(snake_case , buff=0.5 , aligned_edge=snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case , run_time=3 ) , Write(snake_case , run_time=1 ) , Create(snake_case , run_time=1 ) )
lowercase = []
for i, rect in enumerate(snake_case ):
lowercase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case , run_time=1.5 ) )
self.play(*snake_case )
self.play(FadeOut(snake_case ) )
lowercase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case , run_time=3 ) )
self.play(
FadeOut(snake_case , snake_case , *snake_case , *snake_case ) , )
self.wait()
| 195
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =CTRLTokenizer
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : str =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_UpperCAmelCase : Any =dict(zip(snake_case , range(len(snake_case))))
_UpperCAmelCase : Optional[int] =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_UpperCAmelCase : List[str] ={'unk_token': '<unk>'}
_UpperCAmelCase : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any ='adapt react readapt apt'
_UpperCAmelCase : Optional[Any] ='adapt react readapt apt'
return input_text, output_text
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase : Any ='adapt react readapt apt'
_UpperCAmelCase : Dict ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_UpperCAmelCase : Union[str, Any] =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case , snake_case)
_UpperCAmelCase : int =tokens + [tokenizer.unk_token]
_UpperCAmelCase : Any =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case) , snake_case)
| 242
|
'''simple docstring'''
from string import ascii_uppercase
lowercase ={str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
_UpperCAmelCase : Union[str, Any] =''
_UpperCAmelCase : Optional[int] =0
_UpperCAmelCase : str =0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase : int =divmod(__lowerCamelCase , __lowerCamelCase )
if base >= 1_1 and 9 < mod < 3_6:
_UpperCAmelCase : str =ALPHABET_VALUES[str(__lowerCamelCase )]
else:
_UpperCAmelCase : Any =str(__lowerCamelCase )
new_value += actual_value
_UpperCAmelCase : Union[str, Any] =num // base
_UpperCAmelCase : Dict =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ : int = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = R'''.*/layers_(\d+)'''
lowerCAmelCase = key
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , snake_case__ )
lowerCAmelCase = R'''(encoder|decoder)\/'''
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , snake_case__ )
lowerCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , snake_case__ )
elif groups[0] == "decoder":
lowerCAmelCase = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , snake_case__ )
lowerCAmelCase = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase = new_key.replace(snake_case__ , snake_case__ )
print(f"{key} -> {new_key}" )
lowerCAmelCase = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase = s_dict[key].shape[0]
lowerCAmelCase = s_dict[key]
for idx in range(snake_case__ ):
lowerCAmelCase = expert_weihts[idx]
print(f"{key} -> {key.replace('expert/' , 'nested fstring' )}" )
s_dict.pop(snake_case__ )
return s_dict
lowercase__ : Union[str, Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[int]:
# Convert a google style config to the hugging face fromat
import regex as re
with open(snake_case__ , '''r''' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = re.findall(R'''(.*) = ([0-9.]*)''' , snake_case__ )
lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase = float(snake_case__ ) if '''.''' in value else int(snake_case__ )
lowerCAmelCase = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , snake_case__ )[0]
lowerCAmelCase = str(activation[1] )
lowerCAmelCase = num_experts
lowerCAmelCase = SwitchTransformersConfig(**snake_case__ )
return config
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ) -> Union[str, Any]:
# Initialise PyTorch model
print(f"Loading flax weights from : {flax_checkpoint_path}" )
lowerCAmelCase = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
lowerCAmelCase = convert_gin_to_config(snake_case__ , snake_case__ )
else:
lowerCAmelCase = SwitchTransformersConfig.from_pretrained(snake_case__ )
lowerCAmelCase = SwitchTransformersForConditionalGeneration(snake_case__ )
lowerCAmelCase = flax_params['''target''']
lowerCAmelCase = flatten_dict(snake_case__ , sep='''/''' )
lowerCAmelCase = rename_keys(snake_case__ )
lowerCAmelCase = unflatten_dict(snake_case__ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(f"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
lowercase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 338
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , _a ):
snake_case_ : Optional[int] = []
snake_case_, snake_case_ : str = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
snake_case_ : List[Any] = result + left + right
return input_list
def __lowercase ( _a ):
if len(_a ) <= 1:
return input_list
snake_case_ : Dict = list(_a )
# iteration for two-way merging
snake_case_ : int = 2
while p <= len(_a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_a ) , _a ):
snake_case_ : Any = i
snake_case_ : List[Any] = i + p - 1
snake_case_ : Optional[int] = (low + high + 1) // 2
snake_case_ : List[str] = merge(_a , _a , _a , _a )
# final merge of last two parts
if p * 2 >= len(_a ):
snake_case_ : Union[str, Any] = i
snake_case_ : Optional[Any] = merge(_a , 0 , _a , len(_a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase__ : str = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
lowercase__ : str = []
else:
lowercase__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 155
|
"""simple docstring"""
import math
import sys
def __lowercase ( _a ):
if number != int(_a ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
snake_case_ : int = [-1] * (number + 1)
snake_case_ : int = 0
for i in range(1 , number + 1 ):
snake_case_ : Tuple = sys.maxsize
snake_case_ : List[Any] = int(math.sqrt(_a ) )
for j in range(1 , root + 1 ):
snake_case_ : Dict = 1 + answers[i - (j**2)]
snake_case_ : int = min(_a , _a )
snake_case_ : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155
| 1
|
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : dict[str, list[str]] , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE : dict[str, str | None] = {}
SCREAMING_SNAKE_CASE : List[str] = source_vertex
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {self.source_vertex}
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = vertex
queue.append(lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE : Optional[Any] = self.parent.get(lowerCamelCase_ )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE : Tuple = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(lowerCamelCase_ )
return self.shortest_path(lowerCamelCase_ ) + f'''->{target_vertex}'''
if __name__ == "__main__":
__UpperCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 323
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''efficientnet'''
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 6_00 , lowerCamelCase_ : float = 2.0 , lowerCamelCase_ : float = 3.1 , lowerCamelCase_ : int = 8 , lowerCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCamelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCamelCase_ : List[int] = [] , lowerCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase_ : float = 0.25 , lowerCamelCase_ : str = "swish" , lowerCamelCase_ : int = 25_60 , lowerCamelCase_ : str = "mean" , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : float = 0.001 , lowerCamelCase_ : float = 0.99 , lowerCamelCase_ : float = 0.5 , lowerCamelCase_ : float = 0.2 , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : int = width_coefficient
SCREAMING_SNAKE_CASE : List[str] = depth_coefficient
SCREAMING_SNAKE_CASE : Optional[Any] = depth_divisor
SCREAMING_SNAKE_CASE : List[str] = kernel_sizes
SCREAMING_SNAKE_CASE : Dict = in_channels
SCREAMING_SNAKE_CASE : List[str] = out_channels
SCREAMING_SNAKE_CASE : Any = depthwise_padding
SCREAMING_SNAKE_CASE : Dict = strides
SCREAMING_SNAKE_CASE : Optional[Any] = num_block_repeats
SCREAMING_SNAKE_CASE : Any = expand_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dim
SCREAMING_SNAKE_CASE : List[str] = pooling_type
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = batch_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_norm_momentum
SCREAMING_SNAKE_CASE : Dict = dropout_rate
SCREAMING_SNAKE_CASE : int = drop_connect_rate
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowerCamelCase_ ) * 4
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1e-5
| 323
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'swinv2'
__UpperCamelCase = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a__ : Dict=2_24 , a__ : Tuple=4 , a__ : Optional[Any]=3 , a__ : Optional[int]=96 , a__ : Any=[2, 2, 6, 2] , a__ : Any=[3, 6, 12, 24] , a__ : Union[str, Any]=7 , a__ : Dict=4.0 , a__ : List[str]=True , a__ : Optional[int]=0.0 , a__ : Optional[Any]=0.0 , a__ : Optional[Any]=0.1 , a__ : int="gelu" , a__ : List[Any]=False , a__ : int=0.0_2 , a__ : List[Any]=1E-5 , a__ : str=32 , **a__ : int , ) -> int:
'''simple docstring'''
super().__init__(**a__ )
_A = image_size
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(a__ )
_A = num_heads
_A = window_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = use_absolute_embeddings
_A = layer_norm_eps
_A = initializer_range
_A = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(a__ ) - 1) )
_A = (0, 0, 0, 0)
| 362
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a_ ( self : Optional[int] ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_A = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_A = CLIPTextModel(a__ )
_A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a_ ( self : Optional[Any] , a__ : Dict , a__ : Tuple=0 ) -> Union[str, Any]:
'''simple docstring'''
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_A = Image.fromarray(np.uinta(a__ ) ).convert("RGB" )
if str(a__ ).startswith("mps" ):
_A = torch.manual_seed(a__ )
else:
_A = torch.Generator(device=a__ ).manual_seed(a__ )
_A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def a_ ( self : Dict ) -> str:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ ( self : str ) -> Optional[int]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = "french fries"
_A = sd_pipe(**a__ , negative_prompt=a__ )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = [inputs["prompt"]] * 2
_A = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
_A = torch.from_numpy(a__ ).unsqueeze(0 ).to(a__ )
_A = image / 2 + 0.5
_A = image.permute(0 , 3 , 1 , 2 )
_A = image.repeat(2 , 1 , 1 , 1 )
_A = sd_pipe(**a__ ).images
_A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_A = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = "cpu" # ensure determinism for the device-dependent torch.Generator
_A = self.get_dummy_components()
_A = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
_A = StableDiffusionInstructPixaPixPipeline(**a__ )
_A = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_A = self.get_dummy_inputs(a__ )
_A = sd_pipe(**a__ ).images
_A = image[0, -3:, -3:, -1]
_A = [round(a__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(a__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def a_ ( self : str ) -> Any:
'''simple docstring'''
_A = self.get_dummy_components()
_A = StableDiffusionInstructPixaPixPipeline(**a__ )
_A = VaeImageProcessor(do_resize=a__ , do_normalize=a__ )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_A = pipe(**self.get_dummy_inputs_by_type(a__ , input_image_type="pt" ) )[0]
_A = components["vae"]
_A = self.get_dummy_inputs_by_type(a__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_A = vae.encode(inputs[image_param] ).latent_dist.mode()
_A = pipe(**a__ )[0]
_A = np.abs(out - out_latents_inputs ).max()
self.assertLess(a__ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase):
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Optional[Any] , a__ : str=0 ) -> List[Any]:
'''simple docstring'''
_A = torch.manual_seed(a__ )
_A = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
_A = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**a__ ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a__ )
_A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**a__ ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a__ )
_A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = self.get_inputs()
_A = pipe(**a__ ).images
_A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_A = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = 0
def callback_fn(a__ : int , a__ : int , a__ : torch.FloatTensor ) -> None:
_A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_A = latents[0, -3:, -3:, -1]
_A = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_A = False
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a__ , torch_dtype=torch.floataa )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = self.get_inputs()
pipe(**a__ , callback=a__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=a__ , torch_dtype=torch.floataa )
_A = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_A = self.get_inputs()
_A = pipe(**a__ )
_A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_A = inputs["image"].resize((5_04, 5_04) )
_A = "timbrooks/instruct-pix2pix"
_A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
a__ , safety_checker=a__ , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
_A = pipe(**a__ )
_A = output.images[0]
_A = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_A = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 163
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_ ( __UpperCAmelCase ) -> int:
if "model" in orig_key:
lowerCAmelCase__ : Optional[int] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
lowerCAmelCase__ : Any = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
lowerCAmelCase__ : Optional[int] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
lowerCAmelCase__ : Optional[int] = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
lowerCAmelCase__ : Dict = orig_key.split(""".""" )[0].split("""_""" )[-1]
lowerCAmelCase__ : Any = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCAmelCase__ : List[Any] = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
lowerCAmelCase__ : Tuple = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
lowerCAmelCase__ : List[Any] = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
lowerCAmelCase__ : Any = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
lowerCAmelCase__ : Optional[int] = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
lowerCAmelCase__ : Optional[int] = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
lowerCAmelCase__ : List[str] = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
lowerCAmelCase__ : Union[str, Any] = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
lowerCAmelCase__ : List[str] = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
lowerCAmelCase__ : Union[str, Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
lowerCAmelCase__ : int = """yoso.""" + orig_key
return orig_key
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ : List[str] = orig_state_dict.pop(__UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCAmelCase__ : Tuple = val
lowerCAmelCase__ : Dict = orig_state_dict["""cls.predictions.decoder.bias"""]
lowerCAmelCase__ : Optional[Any] = torch.arange(__UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Union[str, Any] = torch.load(__UpperCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
lowerCAmelCase__ : Optional[Any] = YosoConfig.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ : Any = YosoForMaskedLM(__UpperCAmelCase )
lowerCAmelCase__ : Dict = convert_checkpoint_helper(config.max_position_embeddings , __UpperCAmelCase )
print(model.load_state_dict(__UpperCAmelCase ) )
model.eval()
model.save_pretrained(__UpperCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 242
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase ) -> List[List[ImageInput]]:
if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__UpperCAmelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = ["pixel_values"]
def __init__( self : Dict , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : int = size if size is not None else {"""shortest_edge""": 2_56}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
lowerCAmelCase__ : Union[str, Any] = do_resize
lowerCAmelCase__ : str = size
lowerCAmelCase__ : str = do_center_crop
lowerCAmelCase__ : Tuple = crop_size
lowerCAmelCase__ : Union[str, Any] = resample
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor
lowerCAmelCase__ : Dict = offset
lowerCAmelCase__ : Optional[int] = do_normalize
lowerCAmelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" in size:
lowerCAmelCase__ : int = get_resize_output_image_size(UpperCamelCase , size["""shortest_edge"""] , default_to_square=UpperCamelCase )
elif "height" in size and "width" in size:
lowerCAmelCase__ : Union[str, Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : int = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = image.astype(np.floataa )
if offset:
lowerCAmelCase__ : Tuple = image - (scale / 2)
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ : Optional[Any] = to_numpy_array(UpperCamelCase )
if do_resize:
lowerCAmelCase__ : List[str] = self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase )
if do_center_crop:
lowerCAmelCase__ : List[str] = self.center_crop(UpperCamelCase , size=UpperCamelCase )
if do_rescale:
lowerCAmelCase__ : Optional[int] = self.rescale(image=UpperCamelCase , scale=UpperCamelCase , offset=UpperCamelCase )
if do_normalize:
lowerCAmelCase__ : Tuple = self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase )
lowerCAmelCase__ : List[str] = to_channel_dimension_format(UpperCamelCase , UpperCamelCase )
return image
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Dict = offset if offset is not None else self.offset
lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[Any] = size if size is not None else self.size
lowerCAmelCase__ : Tuple = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase__ : int = make_batched(UpperCamelCase )
lowerCAmelCase__ : str = [
[
self._preprocess_image(
image=UpperCamelCase , do_resize=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , do_center_crop=UpperCamelCase , crop_size=UpperCamelCase , do_rescale=UpperCamelCase , rescale_factor=UpperCamelCase , offset=UpperCamelCase , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , data_format=UpperCamelCase , )
for img in video
]
for video in videos
]
lowerCAmelCase__ : Dict = {"""pixel_values""": videos}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 242
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Union[str, Any]=3, lowerCamelCase : Union[str, Any]=18, lowerCamelCase : List[str]=30, lowerCamelCase : int=400, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Dict=None, lowerCamelCase : Optional[int]=True, lowerCamelCase : Tuple=[0.5, 0.5, 0.5], lowerCamelCase : Dict=[0.5, 0.5, 0.5], )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =size if size is not None else {'''height''': 18, '''width''': 18}
lowerCamelCase__ : Union[str, Any] =parent
lowerCamelCase__ : Tuple =batch_size
lowerCamelCase__ : Union[str, Any] =num_channels
lowerCamelCase__ : List[str] =image_size
lowerCamelCase__ : List[str] =min_resolution
lowerCamelCase__ : Tuple =max_resolution
lowerCamelCase__ : Tuple =do_resize
lowerCamelCase__ : List[Any] =size
lowerCamelCase__ : str =do_normalize
lowerCamelCase__ : Union[str, Any] =image_mean
lowerCamelCase__ : Union[str, Any] =image_std
def snake_case ( self : Any )-> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = DPTImageProcessor if is_vision_available() else None
def snake_case ( self : str )-> Optional[Any]:
lowerCamelCase__ : int =DPTImageProcessingTester(self )
@property
def snake_case ( self : Any )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase, '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase, '''size''' ) )
def snake_case ( self : str )-> Union[str, Any]:
lowerCamelCase__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
lowerCamelCase__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def snake_case ( self : Dict )-> Dict:
# Initialize image_processing
lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, Image.Image )
# Test not batched input
lowerCamelCase__ : str =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCamelCase__ : List[str] =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def snake_case ( self : int )-> str:
# Initialize image_processing
lowerCamelCase__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, np.ndarray )
# Test not batched input
lowerCamelCase__ : Dict =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCamelCase__ : Any =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def snake_case ( self : Dict )-> Optional[int]:
# Initialize image_processing
lowerCamelCase__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase, torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase, torch.Tensor )
# Test not batched input
lowerCamelCase__ : Optional[int] =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
lowerCamelCase__ : int =image_processing(lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 350
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : Dict = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_lowercase : str = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_lowercase : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowercase : Any = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_lowercase : List[Any] = "allenai"
def snake_case__ ( __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : Optional[Any] =dict((re.sub(R'''@@$''' , '''''' , __lowerCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : Tuple ='''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : str =d[k] # restore
return da
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
# prep
assert os.path.exists(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Union[str, Any] =basename(__lowerCamelCase )
lowerCamelCase__ : str =dirname(__lowerCamelCase )
lowerCamelCase__ : Dict =fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : Union[str, Any] =cls.hub_models()
lowerCamelCase__ : Optional[Any] ={'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
lowerCamelCase__ : Any ='''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Optional[int] =hub_utils.from_pretrained(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , archive_map=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Any =vars(chkpt['''args''']['''model'''] )
lowerCamelCase__ : int =args['''source_lang''']
lowerCamelCase__ : Optional[Any] =args['''target_lang''']
lowerCamelCase__ : Dict =dirname(__lowerCamelCase )
lowerCamelCase__ : str =basename(__lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] =os.path.join(__lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : int =os.path.join(__lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict =Dictionary.load(__lowerCamelCase )
lowerCamelCase__ : List[str] =rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : Optional[int] =len(__lowerCamelCase )
lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , '''vocab-src.json''' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] =True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int =False
break
lowerCamelCase__ : Any =Dictionary.load(__lowerCamelCase )
lowerCamelCase__ : Tuple =rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : str =len(__lowerCamelCase )
lowerCamelCase__ : Dict =os.path.join(__lowerCamelCase , '''vocab-tgt.json''' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : Union[str, Any] =os.path.join(__lowerCamelCase , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Tuple =os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
break
with open(__lowerCamelCase , encoding='''utf-8''' ) as fin:
lowerCamelCase__ : Optional[Any] =fin.read()
lowerCamelCase__ : List[Any] =re.sub(R''' \d+$''' , '''''' , __lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(__lowerCamelCase )
# model config
lowerCamelCase__ : List[Any] =os.path.join(__lowerCamelCase , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase__ : str ={
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
lowerCamelCase__ : Optional[int] =5
lowerCamelCase__ : List[str] =False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : Optional[int] =best_score_hparams[model_dir]['''length_penalty''']
else:
lowerCamelCase__ : Union[str, Any] =1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Any =os.path.join(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int ={
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1024,
'''do_lower_case''': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__lowerCamelCase , ensure_ascii=__lowerCamelCase , indent=__lowerCamelCase ) )
# model
lowerCamelCase__ : int =chkpt['''models'''][0]
lowerCamelCase__ : Union[str, Any] =model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : Union[str, Any] =OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : str =[
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict =FSMTConfig.from_pretrained(__lowerCamelCase )
lowerCamelCase__ : int =FSMTForConditionalGeneration(__lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
# save
lowerCamelCase__ : Optional[int] =os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowerCamelCase , __lowerCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowercase : str = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 272
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __lowerCamelCase :
'''simple docstring'''
A_ : Optional[int] = MBartConfig
A_ : Union[str, Any] = {}
A_ : Optional[Any] = 'gelu'
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = eos_token_id
_a = pad_token_id
_a = bos_token_id
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a = tf.concat([input_ids, eos_tensor] , axis=1 )
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a = prepare_mbart_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = TFMBartModel(config=lowercase_ ).get_decoder()
_a = inputs_dict['''input_ids''']
_a = input_ids[:1, :]
_a = inputs_dict['''attention_mask'''][:1, :]
_a = inputs_dict['''head_mask''']
_a = 1
# first forward pass
_a = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
_a = outputs.to_tuple()
_a = past_key_values[1]
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : Any=None, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if attention_mask is None:
_a = tf.cast(tf.math.not_equal(A_, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCamelCase ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : List[str] = True
A_ : Optional[int] = False
A_ : int = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _UpperCAmelCase ( self ) -> Tuple:
_a = TFMBartModelTester(self )
_a = ConfigTester(self , config_class=lowercase_ )
def _UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
]
A_ : Any = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
A_ : List[Any] = 'facebook/mbart-large-en-ro'
@cached_property
def _UpperCAmelCase ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self ) -> int:
_a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
_a = self.translate_src_text(**lowercase_ )
self.assertListEqual(self.expected_text , lowercase_ )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]:
_a = self.tokenizer(self.src_text , **lowercase_ , return_tensors='''tf''' )
_a = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_a = self.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
return generated_words
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self._assert_generated_batch_equal_expected()
| 320
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Any = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowercase ( self : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = TextaTextGenerationPipeline(model=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
return generator, ["Something to write", "Something else"]
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[str] ):
__lowercase = generator("Something there" )
self.assertEqual(UpperCAmelCase__, [{"generated_text": ANY(UpperCAmelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowercase = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__, [
[{"generated_text": ANY(UpperCAmelCase__ )}, {"generated_text": ANY(UpperCAmelCase__ )}],
[{"generated_text": ANY(UpperCAmelCase__ )}, {"generated_text": ANY(UpperCAmelCase__ )}],
], )
__lowercase = generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__, [
[{"generated_text": ANY(UpperCAmelCase__ )}, {"generated_text": ANY(UpperCAmelCase__ )}],
[{"generated_text": ANY(UpperCAmelCase__ )}, {"generated_text": ANY(UpperCAmelCase__ )}],
], )
with self.assertRaises(UpperCAmelCase__ ):
generator(4 )
@require_torch
def _lowercase ( self : Optional[int] ):
__lowercase = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt" )
# do_sample=False necessary for reproducibility
__lowercase = generator("Something there", do_sample=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, [{"generated_text": ""}] )
__lowercase = 3
__lowercase = generator(
"Something there", num_return_sequences=UpperCAmelCase__, num_beams=UpperCAmelCase__, )
__lowercase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = generator("This is a test", do_sample=UpperCAmelCase__, num_return_sequences=2, return_tensors=UpperCAmelCase__ )
self.assertEqual(
UpperCAmelCase__, [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
], )
__lowercase = generator.model.config.eos_token_id
__lowercase = "<pad>"
__lowercase = generator(
["This is a test", "This is a second test"], do_sample=UpperCAmelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCAmelCase__, )
self.assertEqual(
UpperCAmelCase__, [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
], )
@require_tf
def _lowercase ( self : Any ):
__lowercase = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf" )
# do_sample=False necessary for reproducibility
__lowercase = generator("Something there", do_sample=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, [{"generated_text": ""}] )
| 366
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["pixel_values"]
def __init__( self : int, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Dict[str, int]] = None, UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, **UpperCAmelCase__ : str, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = size if size is not None else {"shortest_edge": 2_5_6}
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : int, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowercase = get_resize_output_image_size(UpperCAmelCase__, size=size["shortest_edge"], default_to_square=UpperCAmelCase__ )
return resize(UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Dict[str, int], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Dict, ):
__lowercase = get_size_dict(UpperCAmelCase__ )
return center_crop(UpperCAmelCase__, size=(size["height"], size["width"]), data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : float, UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : Union[str, Any] ):
return rescale(UpperCAmelCase__, scale=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : np.ndarray, UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Union[float, List[float]], UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCAmelCase__ : int, ):
return normalize(UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__, data_format=UpperCAmelCase__, **UpperCAmelCase__ )
def _lowercase ( self : Any, UpperCAmelCase__ : ImageInput, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : PILImageResampling = None, UpperCAmelCase__ : bool = None, UpperCAmelCase__ : Dict[str, int] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[float] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[float, List[float]]] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST, **UpperCAmelCase__ : Optional[int], ):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(UpperCAmelCase__, default_to_square=UpperCAmelCase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(UpperCAmelCase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=UpperCAmelCase__, size=UpperCAmelCase__, resample=UpperCAmelCase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=UpperCAmelCase__, size=UpperCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=UpperCAmelCase__, scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=UpperCAmelCase__, mean=UpperCAmelCase__, std=UpperCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(UpperCAmelCase__, UpperCAmelCase__ ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__, tensor_type=UpperCAmelCase__ )
| 144
| 0
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Optional[int] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = None , **lowercase_ : Any , ):
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
snake_case_ : Dict = field
snake_case_ : Optional[int] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths}
snake_case_ : List[Any] = Json(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , field=lowercase_ , **lowercase_ , )
def _snake_case ( self : str ):
# Build iterable dataset
if self.streaming:
snake_case_ : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[int] = None
snake_case_ : List[str] = None
snake_case_ : str = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
snake_case_ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : Any , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
snake_case_ : Any = dataset
snake_case_ : str = path_or_buf
snake_case_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
snake_case_ : int = num_proc
snake_case_ : Union[str, Any] = '''utf-8'''
snake_case_ : List[Any] = to_json_kwargs
def _snake_case ( self : List[str] ):
snake_case_ : Optional[int] = self.to_json_kwargs.pop('''path_or_buf''' , lowercase_ )
snake_case_ : List[Any] = self.to_json_kwargs.pop('''orient''' , '''records''' )
snake_case_ : str = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
snake_case_ : Union[str, Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
snake_case_ : List[str] = self.to_json_kwargs.pop('''compression''' , lowercase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowercase_ ) as buffer:
snake_case_ : str = self._write(file_obj=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
''' was passed. Please provide a local path instead.''' )
snake_case_ : List[str] = self._write(
file_obj=self.path_or_buf , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **self.to_json_kwargs )
return written
def _snake_case ( self : Union[str, Any] , lowercase_ : int ):
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = args
snake_case_ : Dict = query_table(
table=self.dataset.data , key=slice(lowercase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
snake_case_ : Optional[int] = batch.to_pandas().to_json(
path_or_buf=lowercase_ , orient=lowercase_ , lines=lowercase_ , index=lowercase_ , **lowercase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case ( self : int , lowercase_ : BinaryIO , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : int , **lowercase_ : Any , ):
snake_case_ : List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
snake_case_ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase_ )
else:
snake_case_, snake_case_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase_ , lowercase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowercase_ )
return written
| 264
|
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264
| 1
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class a ( datasets.BuilderConfig ):
__lowerCAmelCase : int = 1_00_00
__lowerCAmelCase : Optional[List[str]] = None
__lowerCAmelCase : Optional[datasets.Features] = None
class a ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : Tuple = ParquetConfig
def __lowerCamelCase ( self :int ):
return datasets.DatasetInfo(features=self.config.features )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : Any = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
snake_case__ : Dict = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
snake_case__ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ : Any = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'''files''': files} )]
snake_case__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
snake_case__ : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case__ : Optional[Any] = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCamelCase__ ):
with open(lowerCamelCase__ ,'''rb''' ) as f:
snake_case__ : Union[str, Any] = datasets.Features.from_arrow_schema(pq.read_schema(lowerCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={'''files''': files} ) )
return splits
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Optional[Any] ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : Optional[Any] = table_cast(lowerCamelCase__ ,self.info.features.arrow_schema )
return pa_table
def __lowerCamelCase ( self :Tuple ,__lowercase :int ):
snake_case__ : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__ ,'''rb''' ) as f:
snake_case__ : List[Any] = pq.ParquetFile(lowerCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ):
snake_case__ : Union[str, Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(lowerCamelCase__ )}: {e}""" )
raise
| 356
|
A__ = 0 # The first color of the flag.
A__ = 1 # The second color of the flag.
A__ = 2 # The third color of the flag.
A__ = (red, white, blue)
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : str = len(__lowerCAmelCase ) - 1
snake_case__ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ : int = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : List[Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by commas:\n''').strip()
A__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 44
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = ['''audio_values''', '''audio_mask''']
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : str=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1_6, 1_6] ,SCREAMING_SNAKE_CASE__ : Dict=1_2_8 ,SCREAMING_SNAKE_CASE__ : str=4_4_1_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8_6 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE__ ,sampling_rate=SCREAMING_SNAKE_CASE__ ,padding_value=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = spectrogram_length
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = patch_size
__lowerCamelCase : Any = feature_size // self.patch_size[1]
__lowerCamelCase : Tuple = n_fft
__lowerCamelCase : List[str] = sampling_rate // hop_length_to_sampling_rate
__lowerCamelCase : Tuple = sampling_rate
__lowerCamelCase : str = padding_value
__lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=SCREAMING_SNAKE_CASE__ ,min_frequency=0.0 ,max_frequency=22050.0 ,sampling_rate=SCREAMING_SNAKE_CASE__ ,norm='slaney' ,mel_scale='slaney' ,).T
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : np.array):
__lowerCamelCase : Dict = spectrogram(
SCREAMING_SNAKE_CASE__ ,window_function(self.n_fft ,'hann') ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
__lowerCamelCase : Optional[Any] = log_spec[:, :-1]
__lowerCamelCase : List[Any] = log_spec - 20.0
__lowerCamelCase : Tuple = np.clip(log_spec / 40.0 ,-2.0 ,0.0) + 1.0
return log_spec
def __call__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,**SCREAMING_SNAKE_CASE__ : int ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__lowerCamelCase : int = isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__lowerCamelCase : Tuple = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE__ ,(list, tuple)) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list)))
)
if is_batched:
__lowerCamelCase : int = [np.asarray([speech] ,dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray):
__lowerCamelCase : Optional[Any] = np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa)
elif isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__lowerCamelCase : Optional[int] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__lowerCamelCase : int = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowerCamelCase : int = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = [np.asarray(SCREAMING_SNAKE_CASE__ ,dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__lowerCamelCase : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowerCamelCase : Dict = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__lowerCamelCase : int = np.array(SCREAMING_SNAKE_CASE__).astype(np.floataa)
# convert into correct format for padding
__lowerCamelCase : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowerCamelCase : List[str] = np.ones([len(SCREAMING_SNAKE_CASE__), 1, max_time_len, self.feature_size]).astype(np.floataa)
__lowerCamelCase : Any = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE__)):
__lowerCamelCase : int = audio_features[i]
__lowerCamelCase : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
__lowerCamelCase : int = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowerCamelCase : List[str] = {'audio_values': padded_audio_features}
__lowerCamelCase : List[str] = BatchFeature(data=SCREAMING_SNAKE_CASE__ ,tensor_type=SCREAMING_SNAKE_CASE__)
return encoded_inputs
| 73
|
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
for i in range(0 , lowerCamelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
for i in range(lowerCamelCase__ , 0 , -1 ):
for _ in range(lowerCamelCase__ , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowerCamelCase__ ) # upper half
reverse_floyd(lowerCamelCase__ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
a =1
while K:
a =int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
a =int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 73
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ):
# Construct model
if gpta_config_file == "":
__lowercase = GPTaConfig()
else:
__lowercase = GPTaConfig.from_json_file(lowerCamelCase_ )
__lowercase = GPTaModel(lowerCamelCase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Save pytorch-model
__lowercase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowercase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase_ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 368
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = 1
__lowercase = 2
while i * i <= n:
__lowercase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowerCAmelCase ( ):
__lowercase = 1
__lowercase = 1
while True:
i += 1
t_num += i
if count_divisors(lowerCamelCase_ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 217
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
|
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ )
_SCREAMING_SNAKE_CASE = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = y_points[i]
for i in range(2 ,snake_case__ ):
for j in range(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase ( nn.Module ):
_lowercase: List[str] = 42
_lowercase: List[Any] = jnp.floataa
def lowercase__ ( self : int ) -> List[Any]:
_lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , __snake_case : List[str] ) -> Tuple:
_lowerCAmelCase = hidden_states.shape
_lowerCAmelCase = jax.image.resize(
__snake_case , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
_lowerCAmelCase = self.conv(__snake_case )
return hidden_states
class UpperCAmelCase ( nn.Module ):
_lowercase: Optional[int] = 42
_lowercase: int = jnp.floataa
def lowercase__ ( self : List[Any] ) -> List[Any]:
_lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase = self.conv(__snake_case )
return hidden_states
class UpperCAmelCase ( nn.Module ):
_lowercase: Union[str, Any] = 42
_lowercase: Optional[Any] = None
_lowercase: Tuple = 0.0
_lowercase: int = None
_lowercase: Optional[Any] = jnp.floataa
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCAmelCase = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase = nn.Dense(__snake_case , dtype=self.dtype )
_lowerCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
_lowerCAmelCase = nn.Dropout(self.dropout_prob )
_lowerCAmelCase = nn.Conv(
__snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase = None
if use_nin_shortcut:
_lowerCAmelCase = nn.Conv(
__snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[str] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any]=True ) -> int:
_lowerCAmelCase = hidden_states
_lowerCAmelCase = self.norma(__snake_case )
_lowerCAmelCase = nn.swish(__snake_case )
_lowerCAmelCase = self.conva(__snake_case )
_lowerCAmelCase = self.time_emb_proj(nn.swish(__snake_case ) )
_lowerCAmelCase = jnp.expand_dims(jnp.expand_dims(__snake_case , 1 ) , 1 )
_lowerCAmelCase = hidden_states + temb
_lowerCAmelCase = self.norma(__snake_case )
_lowerCAmelCase = nn.swish(__snake_case )
_lowerCAmelCase = self.dropout(__snake_case , __snake_case )
_lowerCAmelCase = self.conva(__snake_case )
if self.conv_shortcut is not None:
_lowerCAmelCase = self.conv_shortcut(__snake_case )
return hidden_states + residual
| 352
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Dict:
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCAmelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__snake_case , __snake_case )
def lowercase__ ( self : Optional[int] , **__snake_case : Dict ) -> List[str]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : List[str] , **__snake_case : Any ) -> List[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : Tuple , **__snake_case : List[str] ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase__ ( self : str ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : str ) -> List[Any]:
_lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def lowercase__ ( self : Optional[int] ) -> Dict:
_lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(__snake_case , return_tensors="""np""" )
_lowerCAmelCase = processor(images=__snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Any ) -> str:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=__snake_case )
_lowerCAmelCase = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : str ) -> Union[str, Any]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase__ ( self : Union[str, Any] ) -> Dict:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(images=__snake_case , visual_prompt=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase__ ( self : int ) -> Tuple:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=__snake_case , image_processor=__snake_case )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(__snake_case )
_lowerCAmelCase = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
| 220
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Any = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __UpperCAmelCase :
def __init__( self : List[Any], __A : List[str], __A : List[str]=1_3, __A : Any=6_4, __A : Optional[Any]=2, __A : str=3, __A : str=True, __A : str=True, __A : Optional[Any]=3_2, __A : List[str]=5, __A : int=4, __A : str=3_7, __A : str="gelu", __A : Dict=0.1, __A : List[Any]=0.1, __A : Dict=1_0, __A : int=0.0_2, __A : Any=[1, 1_6, 4, 4], __A : Optional[int]=None, ):
UpperCAmelCase : Union[str, Any] = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : List[str] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : int = scope
UpperCAmelCase : List[str] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase : str = (self.image_size // 3_2) ** 2
UpperCAmelCase : List[str] = num_patches + 1
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ):
UpperCAmelCase : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__A, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=__A, )
def __magic_name__ ( self : Optional[int], __A : Optional[int], __A : int, __A : Tuple ):
UpperCAmelCase : int = ViTHybridModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Tuple, __A : Dict, __A : str, __A : List[str] ):
UpperCAmelCase : str = self.type_sequence_label_size
UpperCAmelCase : List[Any] = ViTHybridForImageClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : Dict = model(__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : int ):
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = config_and_inputs
UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = ViTHybridModelTester(self )
UpperCAmelCase : List[Any] = ConfigTester(self, config_class=__A, has_text_modality=__A, hidden_size=3_7 )
def __magic_name__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ):
pass
def __magic_name__ ( self : int ):
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(__A )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A, nn.Linear ) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(__A )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : str = [*signature.parameters.keys()]
UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = _config_zero_init(__A )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class(config=__A )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase : Union[str, Any] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@slow
def __magic_name__ ( self : List[str] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTHybridModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def a__ ( ) -> Tuple:
UpperCAmelCase : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : str ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
UpperCAmelCase : Tuple = self.default_image_processor
UpperCAmelCase : int = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(images=__A, return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**__A )
# verify the logits
UpperCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, __A )
UpperCAmelCase : Optional[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __A, atol=1E-4 ) )
@slow
@require_accelerate
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Union[str, Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
UpperCAmelCase : int = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''', device_map='''auto''' )
UpperCAmelCase : Tuple = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(images=__A, return_tensors='''pt''' )
UpperCAmelCase : Dict = model(**__A )
UpperCAmelCase : Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx], '''tabby, tabby cat''' )
| 336
| 0
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : List[Any] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A , A , A=None , A=None ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = self.layer[current_layer](A , A , head_mask[current_layer] )
UpperCAmelCase : Tuple = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __magic_name__ , )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A ) -> int:
super().__init__(A )
UpperCAmelCase : Union[str, Any] = BertEncoderWithPabee(A )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Any = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
def _lowercase( self , A ) -> str:
UpperCAmelCase : Optional[Any] = threshold
def _lowercase( self , A ) -> int:
UpperCAmelCase : str = patience
def _lowercase( self ) -> Any:
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[Any] = 0
def _lowercase( self ) -> Dict:
UpperCAmelCase : int = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : Tuple = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(A )
@add_start_docstrings_to_model_forward(A )
def _lowercase( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=False , ) -> Any:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
UpperCAmelCase : List[str] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : str = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
UpperCAmelCase : List[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : str = torch.ones(A , device=A )
if token_type_ids is None:
UpperCAmelCase : Optional[Any] = torch.zeros(A , dtype=torch.long , device=A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(A , A , A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = encoder_hidden_states.size()
UpperCAmelCase : Optional[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : Union[str, Any] = torch.ones(A , device=A )
UpperCAmelCase : List[str] = self.invert_attention_mask(A )
else:
UpperCAmelCase : Any = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : str = self.get_head_mask(A , self.config.num_hidden_layers )
UpperCAmelCase : int = self.embeddings(
input_ids=A , position_ids=A , token_type_ids=A , inputs_embeds=A )
UpperCAmelCase : List[str] = embedding_output
if self.training:
UpperCAmelCase : Union[str, Any] = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : Any = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
UpperCAmelCase : Optional[int] = self.pooler(A )
UpperCAmelCase : Union[str, Any] = output_layers[i](output_dropout(A ) )
res.append(A )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Tuple = self.encoder(
A , attention_mask=A , head_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : Tuple = [output_layers[self.config.num_hidden_layers - 1](A )]
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Tuple = None
UpperCAmelCase : str = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A )
UpperCAmelCase : int = self.pooler(A )
UpperCAmelCase : Dict = output_layers[i](A )
if regression:
UpperCAmelCase : Optional[int] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : int = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(A ) ):
patient_counter += 1
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Optional[int] = logits
if patient_counter == self.patience:
break
UpperCAmelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __magic_name__ , )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A ) -> int:
super().__init__(A )
UpperCAmelCase : Optional[int] = config.num_labels
UpperCAmelCase : Dict = BertModelWithPabee(A )
UpperCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(A )
def _lowercase( self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , ) -> str:
UpperCAmelCase : Any = self.bert(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Dict = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[int] = 0
for ix, logits_item in enumerate(A ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Tuple = MSELoss()
UpperCAmelCase : Optional[int] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Tuple = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : int = (total_loss / total_weights,) + outputs
return outputs
| 338
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a : List[str] = logging.getLogger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A , A , A , A=None ) -> Union[str, Any]:
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
UpperCAmelCase : Optional[Any] = None
def _lowercase( self , A ) -> List[Any]:
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
UpperCAmelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase : str = str(distributed_port + 1 )
UpperCAmelCase : Any = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase( self ) -> Dict:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase( self , A , A , A=torch.floataa ) -> str:
UpperCAmelCase : List[Any] = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def _lowercase( self , A , A ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase , UpperCAmelCase : str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
UpperCAmelCase : int = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase : int = None
if self._is_main():
UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
UpperCAmelCase : List[Any] = question_hidden_states.shape[0]
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = []
if self._is_main():
assert len(A ) == world_size
UpperCAmelCase , UpperCAmelCase : Optional[int] = self._main_retrieve(torch.cat(A ).numpy() , A )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = torch.tensor(A ), torch.tensor(A )
UpperCAmelCase : List[str] = self._chunk_tensor(A , A )
UpperCAmelCase : Union[str, Any] = self._chunk_tensor(A , A )
UpperCAmelCase : Tuple = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase : Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 338
| 1
|
"""simple docstring"""
def _A (__a ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
SCREAMING_SNAKE_CASE_ : List[str] = sorted(string.lower() )
return len(_lowerCamelCase ) == len(set(_lowerCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = input("""Enter a string """).strip()
UpperCAmelCase_ : Union[str, Any] = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 91
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 0
|
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ : List[str] = Rectangle(height=0.25 , width=0.25 )
UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : str = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : Any = Text('CPU' , font_size=2_4 )
UpperCAmelCase_ : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCAmelCase_ : str = [mem.copy() for i in range(4 )]
UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : List[str] = Text('GPU' , font_size=2_4 )
UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
UpperCAmelCase_ : str = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : str = Text('Model' , font_size=2_4 )
UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = []
for i, rect in enumerate(snake_case_ ):
UpperCAmelCase_ : str = fill.copy().set_fill(snake_case_ , opacity=0.8 )
target.move_to(snake_case_ )
model_arr.append(snake_case_ )
UpperCAmelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
UpperCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[str] = [meta_mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : Optional[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCAmelCase_ : Tuple = Text('Disk' , font_size=2_4 )
UpperCAmelCase_ : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4, -1.25, 0] )
self.add(snake_case_ , snake_case_ )
UpperCAmelCase_ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
UpperCAmelCase_ : Dict = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
UpperCAmelCase_ : Optional[Any] = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ ) )
UpperCAmelCase_ : Tuple = Square(0.3 )
input.set_fill(snake_case_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , snake_case_ , buff=0.5 )
self.play(Write(snake_case_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.02 )
self.play(MoveToTarget(snake_case_ ) )
self.play(FadeOut(snake_case_ ) )
UpperCAmelCase_ : Any = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
UpperCAmelCase_ : List[str] = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
UpperCAmelCase_ : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
UpperCAmelCase_ : Union[str, Any] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , snake_case_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
UpperCAmelCase_ : Tuple = AnimationGroup(
FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(snake_case_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
UpperCAmelCase_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
UpperCAmelCase_ : Any = a_c
UpperCAmelCase_ : int = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , )
UpperCAmelCase_ : Optional[Any] = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) )
self.wait()
| 274
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = '''autoformer'''
lowerCamelCase_ :Optional[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = [1, 2, 3, 4, 5, 6, 7] , snake_case_ = True , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = "gelu" , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_ = True , snake_case_=True , snake_case_ = 1_0 , snake_case_ = 2_5 , snake_case_ = 3 , **snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = prediction_length
UpperCAmelCase_ : List[str] = context_length if context_length is not None else prediction_length
UpperCAmelCase_ : Optional[int] = distribution_output
UpperCAmelCase_ : Optional[int] = loss
UpperCAmelCase_ : Union[str, Any] = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : List[str] = lags_sequence
UpperCAmelCase_ : Any = scaling
UpperCAmelCase_ : Any = num_dynamic_real_features
UpperCAmelCase_ : int = num_static_real_features
UpperCAmelCase_ : Optional[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[Any] = cardinality
else:
UpperCAmelCase_ : Optional[int] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[str] = embedding_dimension
else:
UpperCAmelCase_ : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : List[str] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase_ : str = d_model
UpperCAmelCase_ : str = encoder_attention_heads
UpperCAmelCase_ : str = decoder_attention_heads
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : str = decoder_ffn_dim
UpperCAmelCase_ : str = encoder_layers
UpperCAmelCase_ : str = decoder_layers
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : Any = encoder_layerdrop
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : List[str] = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Union[str, Any] = use_cache
# Autoformer
UpperCAmelCase_ : Any = label_length
UpperCAmelCase_ : Union[str, Any] = moving_average
UpperCAmelCase_ : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 274
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE__:Any = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase( a , a=1.0 , a=None , a=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=400 , lowerCamelCase=2000 , lowerCamelCase=1 , lowerCamelCase=0.0 , lowerCamelCase=16000 , lowerCamelCase=True , lowerCamelCase=True , ):
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = feature_size
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
def a__ ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self , lowerCamelCase=False , lowerCamelCase=False ):
def _flatten(lowerCamelCase ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__a = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = ASTFeatureExtractor
def a__ ( self ):
__a = ASTFeatureExtractionTester(self )
def a__ ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__a = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__a = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test batched
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , padding=lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(lowerCamelCase )
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
__a = feat_extract(lowerCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase , lowerCamelCase ):
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
@require_torch
def a__ ( self ):
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self , lowerCamelCase ):
from datasets import load_dataset
__a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__a = ds.sort("id" ).select(range(lowerCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def a__ ( self ):
# fmt: off
__a = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
__a = self._load_datasamples(1 )
__a = ASTFeatureExtractor()
__a = feature_extractor(lowerCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase , atol=1E-4 ) )
| 261
|
"""simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->Any:
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = patch_size
a_ = text_seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = coordinate_size
a_ = shape_size
a_ = num_labels
a_ = num_choices
a_ = scope
a_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a_ = text_seq_length
a_ = (image_size // patch_size) ** 2 + 1
a_ = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self) ->Tuple:
a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.text_seq_length])
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
a_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = LayoutLMvaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
# text + image
a_ = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase)
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
a_ = model(__UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
a_ = model(pixel_values=__UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->str:
a_ = self.num_labels
a_ = LayoutLMvaForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->Union[str, Any]:
a_ = self.num_labels
a_ = LayoutLMvaForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = LayoutLMvaForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->Union[str, Any]:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[str] = False
a_ : int = False
a_ : Dict = False
a_ : str = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : str = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def UpperCAmelCase__ ( self) ->Any:
a_ = LayoutLMvaModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]:
a_ = copy.deepcopy(__UpperCAmelCase)
if model_class in get_values(__UpperCAmelCase):
a_ = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(__UpperCAmelCase , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__UpperCAmelCase):
a_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
elif model_class in get_values(__UpperCAmelCase):
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
elif model_class in [
*get_values(__UpperCAmelCase),
]:
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase)
elif model_class in [
*get_values(__UpperCAmelCase),
]:
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__UpperCAmelCase , )
return inputs_dict
def UpperCAmelCase__ ( self) ->Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[int]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->Optional[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LayoutLMvaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->str:
return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self) ->Dict:
a_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base").to(__UpperCAmelCase)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").pixel_values.to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]])
a_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
a_ = model(
input_ids=input_ids.to(__UpperCAmelCase) , bbox=bbox.to(__UpperCAmelCase) , pixel_values=pixel_values.to(__UpperCAmelCase) , )
# verify the logits
a_ = torch.Size((1, 1_99, 7_68))
self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase)
a_ = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4))
| 303
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 303
| 1
|
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return math.pow(lowerCAmelCase__ , 2 ) - a
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
return 2 * x
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = 2.0
while start <= a:
lowercase_ = math.pow(lowerCAmelCase__ , 2 )
return start
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = 99_99 , __lowerCAmelCase = 0.00_000_000_000_001 ) -> Union[str, Any]:
'''simple docstring'''
if a < 0:
raise ValueError("""math domain error""" )
lowercase_ = get_initial_point(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
lowercase_ = value
lowercase_ = value - fx(lowerCAmelCase__ , lowerCAmelCase__ ) / fx_derivative(lowerCAmelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ):
__a : List[str] = os.path.abspath(lowerCAmelCase__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__a : Tuple = tf.train.list_variables(lowerCAmelCase__ )
__a : Optional[Any] = []
__a : Union[str, Any] = []
__a : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__a : Any = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__a : Any = name[1:]
# figure out how many levels deep the name is
__a : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase__ )
# read data
__a : Tuple = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ )
names.append('''/'''.join(lowerCAmelCase__ ) )
arrays.append(lowerCAmelCase__ )
logger.info(f"Read a total of {len(lowerCAmelCase__ ):,} layers" )
# Sanity check
if len(set(lowerCAmelCase__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(lowerCAmelCase__ ) )})" )
__a : int = list(set(lowerCAmelCase__ ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = full_name.split('''/''' )
__a : Tuple = model
__a : Dict = []
for i, m_name in enumerate(lowerCAmelCase__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__a : Union[str, Any] = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__a : Dict = getattr(lowerCAmelCase__ , '''encoder''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''layer''' )
__a : Any = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__a : Any = getattr(lowerCAmelCase__ , '''pooler''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__a : int = getattr(lowerCAmelCase__ , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__a : List[str] = getattr(lowerCAmelCase__ , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''token_type_embeddings''' )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append('''weight''' )
__a : Tuple = getattr(lowerCAmelCase__ , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''attention''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''attention''' )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''output''' )
__a : Any = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__a : Tuple = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
__a : str = getattr(lowerCAmelCase__ , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__a : Union[str, Any] = getattr(lowerCAmelCase__ , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__a : Optional[Any] = getattr(lowerCAmelCase__ , '''intermediate''' )
__a : Optional[int] = getattr(lowerCAmelCase__ , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__a : int = getattr(lowerCAmelCase__ , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__a : Dict = getattr(lowerCAmelCase__ , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__a : List[Any] = getattr(lowerCAmelCase__ , '''weight''' )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
__a : List[str] = '''.'''.join(lowerCAmelCase__ )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , lowerCAmelCase__ ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , lowerCAmelCase__ ):
__a : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__a : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
__a : str = torch.from_numpy(lowerCAmelCase__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ):
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
__a : Dict = BertConfig.from_json_file(lowerCAmelCase__ )
__a : int = BertModel(lowerCAmelCase__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowercase__ =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 216
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : Union[str, Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[Any] = "efficientnet"
def __init__( self : Optional[int] , A : int = 3 , A : int = 6_0_0 , A : float = 2.0 , A : float = 3.1 , A : int = 8 , A : List[int] = [3, 3, 5, 3, 5, 5, 3] , A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , A : List[int] = [] , A : List[int] = [1, 2, 2, 2, 1, 2, 1] , A : List[int] = [1, 2, 2, 3, 3, 4, 1] , A : List[int] = [1, 6, 6, 6, 6, 6, 6] , A : float = 0.25 , A : str = "swish" , A : int = 2_5_6_0 , A : str = "mean" , A : float = 0.02 , A : float = 0.0_01 , A : float = 0.99 , A : float = 0.5 , A : float = 0.2 , **A : List[Any] , ) ->Any:
super().__init__(**A )
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Optional[Any] = width_coefficient
lowerCamelCase__ : Dict = depth_coefficient
lowerCamelCase__ : int = depth_divisor
lowerCamelCase__ : List[str] = kernel_sizes
lowerCamelCase__ : Dict = in_channels
lowerCamelCase__ : Tuple = out_channels
lowerCamelCase__ : Tuple = depthwise_padding
lowerCamelCase__ : Optional[Any] = strides
lowerCamelCase__ : Optional[Any] = num_block_repeats
lowerCamelCase__ : str = expand_ratios
lowerCamelCase__ : Dict = squeeze_expansion_ratio
lowerCamelCase__ : Tuple = hidden_act
lowerCamelCase__ : Dict = hidden_dim
lowerCamelCase__ : List[str] = pooling_type
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = batch_norm_eps
lowerCamelCase__ : Optional[int] = batch_norm_momentum
lowerCamelCase__ : Tuple = dropout_rate
lowerCamelCase__ : int = drop_connect_rate
lowerCamelCase__ : int = sum(A ) * 4
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : int = version.parse("1.11" )
@property
def __lowerCamelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self : Optional[int] ) ->float:
return 1e-5
| 354
|
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowerCamelCase__ : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11
| 1
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 5_00_00
__snake_case = 50_00
__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
for i in range(__lowerCAmelCase ):
UpperCAmelCase : str =dataset[i]
@get_duration
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] =dataset[i : i + batch_size]
@get_duration
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
'''simple docstring'''
with dataset.formatted_as(type=__lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
UpperCAmelCase : Optional[int] =dataset[i]
@get_duration
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
'''simple docstring'''
with dataset.formatted_as(type=__lowerCAmelCase ):
for i in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =dataset[i : i + batch_size]
def lowerCAmelCase_ ( )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str ={"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase : List[str] =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
UpperCAmelCase : Optional[Any] =[
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
UpperCAmelCase : List[Any] =datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
UpperCAmelCase : Tuple =generate_example_dataset(
os.path.join(__lowerCAmelCase , '''dataset.arrow''' ) , __lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes={'''list''': (1_00,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(__lowerCAmelCase ) )
UpperCAmelCase : List[Any] =func(__lowerCAmelCase , **__lowerCAmelCase )
print('''shuffling dataset''' )
UpperCAmelCase : List[Any] =dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(__lowerCAmelCase ) )
UpperCAmelCase : str =func(
__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , '''wb''' ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 368
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : str = """rwkv"""
__lowerCamelCase : str = {"""max_position_embeddings""": """context_length"""}
def __init__( self , snake_case__=5_0277 , snake_case__=1024 , snake_case__=4096 , snake_case__=32 , snake_case__=None , snake_case__=None , snake_case__=1e-5 , snake_case__=0 , snake_case__=0 , snake_case__=6 , snake_case__=False , snake_case__=True , **snake_case__ , ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int =vocab_size
UpperCAmelCase : List[str] =context_length
UpperCAmelCase : Any =hidden_size
UpperCAmelCase : Tuple =num_hidden_layers
UpperCAmelCase : str =attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase : List[Any] =intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase : Optional[int] =layer_norm_epsilon
UpperCAmelCase : int =rescale_every
UpperCAmelCase : Any =use_cache
UpperCAmelCase : List[str] =bos_token_id
UpperCAmelCase : Any =eos_token_id
super().__init__(
tie_word_embeddings=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 78
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if not scores:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , )
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = [90, 23, 6, 33, 21, 65, 123, 34423]
lowerCAmelCase__ : Optional[int] = math.log(len(UpperCamelCase ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , UpperCamelCase , UpperCamelCase , UpperCamelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 37
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 262
| 0
|
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
_UpperCAmelCase : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_UpperCAmelCase : List[str] = False
def __lowerCAmelCase ( self , A ) -> None:
for word in words:
self.insert(A )
def __lowerCAmelCase ( self , A ) -> None:
_UpperCAmelCase : List[str] = self
for char in word:
if char not in curr.nodes:
_UpperCAmelCase : List[str] = TrieNode()
_UpperCAmelCase : Dict = curr.nodes[char]
_UpperCAmelCase : Any = True
def __lowerCAmelCase ( self , A ) -> bool:
_UpperCAmelCase : List[Any] = self
for char in word:
if char not in curr.nodes:
return False
_UpperCAmelCase : Union[str, Any] = curr.nodes[char]
return curr.is_leaf
def __lowerCAmelCase ( self , A ) -> None:
def _delete(A , A , A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
_UpperCAmelCase : List[str] = False
return len(curr.nodes ) == 0
_UpperCAmelCase : Any = word[index]
_UpperCAmelCase : Any = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_UpperCAmelCase : Dict = _delete(A , A , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , A , 0 )
def lowerCamelCase_ (UpperCamelCase__ : TrieNode , UpperCamelCase__ : str ):
if node.is_leaf:
print(UpperCamelCase__ , end=''' ''' )
for key, value in node.nodes.items():
print_words(UpperCamelCase__ , word + key )
def lowerCamelCase_ ():
_UpperCAmelCase : Tuple = '''banana bananas bandana band apple all beast'''.split()
_UpperCAmelCase : str = TrieNode()
root.insert_many(UpperCamelCase__ )
# print_words(root, "")
assert all(root.find(UpperCamelCase__ ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : bool ):
print(str(UpperCamelCase__ ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowerCamelCase_ ():
assert test_trie()
def lowerCamelCase_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 361
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=a ):
'''simple docstring'''
a__ =['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A , **A ) -> int:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def __lowerCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 68
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 2048,
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__(self : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase_ : Dict="<|endoftext|>" , UpperCAmelCase_ : Any="<|endoftext|>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Dict , ) ->Any:
'''simple docstring'''
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase__: List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase_) != add_prefix_space:
lowerCamelCase__: Union[str, Any] =getattr(UpperCAmelCase_ , pre_tok_state.pop("type"))
lowerCamelCase__: Any =add_prefix_space
lowerCamelCase__: Optional[int] =pre_tok_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =add_prefix_space
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : "Conversation") ->List[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_) + [self.eos_token_id])
if len(UpperCAmelCase_) > self.model_max_length:
lowerCamelCase__: List[str] =input_ids[-self.model_max_length :]
return input_ids
| 10
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 323
| 0
|
'''simple docstring'''
import math
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : str=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
_SCREAMING_SNAKE_CASE = n
_SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase )
] # adjacency matrix for weight
_SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , __lowerCamelCase )] for i in range(0 , __lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = w
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_SCREAMING_SNAKE_CASE = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 111
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 111
| 1
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def UpperCamelCase_ ( self, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 128, 128), rng=random.Random(A ) )
SCREAMING_SNAKE_CASE : str = torch.manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE : Any = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : int = pipe(**A ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : Any = pipe(**A ).images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : List[str] = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE : int = pipe(**A ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[str] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ort.SessionOptions()
SCREAMING_SNAKE_CASE : Any = False
return options
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE : int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=A, image=A, guidance_scale=7.5, num_inference_steps=10, generator=A, output_type='np', )
SCREAMING_SNAKE_CASE : Dict = output.images
SCREAMING_SNAKE_CASE : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE : Dict = init_image.resize((128, 128) )
SCREAMING_SNAKE_CASE : str = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler' )
SCREAMING_SNAKE_CASE : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=A, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe(
prompt=A, image=A, guidance_scale=7.5, num_inference_steps=20, generator=A, output_type='np', )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 251
|
'''simple docstring'''
from __future__ import annotations
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [True] * limit
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE : Any = i * 2
while index < limit:
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : List[Any] = index + i
SCREAMING_SNAKE_CASE : Tuple = [2]
for i in range(3 ,__UpperCamelCase ,2 ):
if is_prime[i]:
primes.append(__UpperCamelCase )
return primes
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = prime_sieve(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
for i in range(len(__UpperCamelCase ) ):
for j in range(i + length ,len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE : Dict = j - i
SCREAMING_SNAKE_CASE : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 251
| 1
|
"""simple docstring"""
import sys
import turtle
def __A ( a_ :tuple[float, float] , a_ :tuple[float, float]) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __A ( a_ :tuple[float, float] , a_ :tuple[float, float] , a_ :tuple[float, float] , a_ :int , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(a_ , get_mid(a_ , a_) , get_mid(a_ , a_) , depth - 1)
triangle(a_ , get_mid(a_ , a_) , get_mid(a_ , a_) , depth - 1)
triangle(a_ , get_mid(a_ , a_) , get_mid(a_ , a_) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
A = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 188
|
"""simple docstring"""
def __A ( a_ :int) -> Union[str, Any]:
__a : int = []
__a : Dict = []
__a : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__a : Tuple = len(a_) if (len(a_) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8) , '''Stack'''.center(a_) , '''Postfix'''.center(a_) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(a_) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(a_) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(a_) == 0:
stack.append(a_) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(a_) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(a_) # push x to stack
print(
x.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
while len(a_) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
''' '''.center(8) , (''''''.join(a_)).ljust(a_) , (''''''.join(a_)).ljust(a_) , sep=''' | ''' , ) # Output in tabular format
return "".join(a_) # return Postfix as str
def __A ( a_ :int) -> List[Any]:
__a : Dict = list(infix[::-1]) # reverse the infix equation
for i in range(len(a_)):
if infix[i] == "(":
__a : Union[str, Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
__a : List[str] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(a_)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 188
| 1
|
from sklearn.metrics import recall_score
import datasets
__UpperCamelCase : Optional[Any] = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
__UpperCamelCase : Any = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
__UpperCamelCase : Union[str, Any] = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def lowerCamelCase__ ( self :str , __magic_name__ :Union[str, Any] , __magic_name__ :int , __magic_name__ :Dict=None , __magic_name__ :Dict=1 , __magic_name__ :Union[str, Any]="binary" , __magic_name__ :int=None , __magic_name__ :Union[str, Any]="warn" , ):
'''simple docstring'''
a = recall_score(
__magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ , zero_division=__magic_name__ , )
return {"recall": float(__magic_name__ ) if score.size == 1 else score}
| 228
|
from math import pi, sqrt
def __A ( __lowerCamelCase ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __A ( ) -> None:
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCamelCase : str = 1.0
while num:
__UpperCamelCase : Dict = float(input("Gamma of: "))
print(F'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 228
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = PriorTransformer
lowercase_ = "hidden_states"
@property
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Dict=0 ):
torch.manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase_ ( self : Tuple ):
return (4, 8)
@property
def lowerCAmelCase_ ( self : int ):
return (4, 8)
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = self.model_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
SCREAMING_SNAKE_CASE_ = model.to(_lowerCAmelCase )
if hasattr(_lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = output[0, :5].flatten().cpu()
print(_lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(_lowerCAmelCase , _lowerCAmelCase , rtol=1E-2 ) )
@slow
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Any=1 , _lowerCAmelCase : Optional[int]=768 , _lowerCAmelCase : Tuple=77 , _lowerCAmelCase : Optional[Any]=0 ):
torch.manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = embedding_dim
SCREAMING_SNAKE_CASE_ = num_embeddings
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, embedding_dim) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.get_dummy_seed_input(seed=_lowerCAmelCase )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
SCREAMING_SNAKE_CASE_ = sample[0, :8].flatten().cpu()
print(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(_lowerCAmelCase )
assert torch_all_close(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
| 353
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 210
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__UpperCAmelCase = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
__UpperCAmelCase = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = ''' Hello world! cécé herlolip'''
__UpperCAmelCase = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> List[str]:
UpperCamelCase : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def UpperCamelCase ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int ) -> Any:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : Optional[Any] = val
def UpperCamelCase ( snake_case__ : Dict ) -> Tuple:
UpperCamelCase : int = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def UpperCamelCase ( snake_case__ : List[str] ) -> Dict:
UpperCamelCase , UpperCamelCase : str = emb.weight.shape
UpperCamelCase : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
UpperCamelCase : List[str] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None ) -> Optional[Any]:
if not os.path.exists(snake_case__ ):
UpperCamelCase : List[str] = torch.hub.load('pytorch/fairseq' , snake_case__ ).eval()
else:
UpperCamelCase : int = load_xsum_checkpoint(snake_case__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase : Tuple = checkpoint_path.replace('.' , '-' )
UpperCamelCase : Optional[int] = BartConfig.from_pretrained(snake_case__ )
UpperCamelCase : Optional[Any] = bart.encode(snake_case__ ).unsqueeze(0 )
UpperCamelCase : Any = BartTokenizer.from_pretrained(snake_case__ ).encode(snake_case__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(snake_case__ , snake_case__ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase : Union[str, Any] = bart.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : int = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase : Any = BartForSequenceClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Any = bart.predict('mnli' , snake_case__ , return_logits=snake_case__ )
UpperCamelCase : Tuple = model(snake_case__ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase : List[str] = bart.model.state_dict()
remove_ignore_keys_(snake_case__ )
UpperCamelCase : List[str] = state_dict['decoder.embed_tokens.weight']
UpperCamelCase : Union[str, Any] = bart.extract_features(snake_case__ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase : List[str] = BartModel(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
UpperCamelCase : Optional[int] = model(snake_case__ ).model[0]
else:
UpperCamelCase : Union[str, Any] = BartForConditionalGeneration(snake_case__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(snake_case__ )
if hasattr(snake_case__ , 'lm_head' ):
UpperCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
UpperCamelCase : Dict = model.model(snake_case__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__UpperCAmelCase = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 119
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__UpperCAmelCase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase ( snake_case__ : int ) -> Optional[int]:
UpperCamelCase : str = EfficientNetConfig()
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['hidden_dim']
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['width_coef']
UpperCamelCase : str = CONFIG_MAP[model_name]['depth_coef']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['dropout_rate']
UpperCamelCase : str = CONFIG_MAP[model_name]['dw_padding']
UpperCamelCase : str = 'huggingface/label-files'
UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCamelCase : Optional[Any] = 1000
UpperCamelCase : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : List[str] ) -> List[Any]:
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
UpperCamelCase : int = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
UpperCamelCase : str = sorted(set(snake_case__ ) )
UpperCamelCase : int = len(snake_case__ )
UpperCamelCase : str = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
UpperCamelCase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
UpperCamelCase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
UpperCamelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase : Dict = 'efficientnet.' + item[1]
UpperCamelCase : Dict = 'classifier.weight'
UpperCamelCase : Dict = 'classifier.bias'
return key_mapping
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase : str = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase : Any = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase : str = torch.from_numpy(np.transpose(snake_case__ ) )
else:
UpperCamelCase : str = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
UpperCamelCase : Union[str, Any] = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
UpperCamelCase : Optional[int] = original_model.trainable_variables
UpperCamelCase : Optional[int] = original_model.non_trainable_variables
UpperCamelCase : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase : List[Any] = param.numpy()
UpperCamelCase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase : str = get_efficientnet_config(snake_case__ )
UpperCamelCase : Any = EfficientNetForImageClassification(snake_case__ ).eval()
UpperCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
UpperCamelCase : List[Any] = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase : List[Any] = convert_image_processor(snake_case__ )
UpperCamelCase : Dict = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = hf_model(**snake_case__ )
UpperCamelCase : Dict = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase : List[Any] = image.img_to_array(snake_case__ )
UpperCamelCase : str = np.expand_dims(snake_case__ , axis=0 )
UpperCamelCase : Any = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 119
| 1
|
"""simple docstring"""
import sys
def lowerCamelCase (a_ :Dict) -> Optional[Any]:
lowercase :Optional[Any] = len(a_)
lowercase :Union[str, Any] = [[0 for x in range(a_)] for x in range(a_)]
lowercase :int = [[0 for x in range(a_)] for x in range(a_)]
for chain_length in range(2 , a_):
for a in range(1 , n - chain_length + 1):
lowercase :Dict = a + chain_length - 1
lowercase :List[str] = sys.maxsize
for c in range(a_ , a_):
lowercase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase :Any = cost
lowercase :Any = c
return matrix, sol
def lowerCamelCase (a_ :Tuple , a_ :List[Any] , a_ :Dict) -> Optional[int]:
if i == j:
print('''A''' + str(a_) , end=''' ''')
else:
print('''(''' , end=''' ''')
print_optiomal_solution(a_ , a_ , optimal_solution[i][j])
print_optiomal_solution(a_ , optimal_solution[i][j] + 1 , a_)
print(''')''' , end=''' ''')
def lowerCamelCase () -> Optional[Any]:
lowercase :Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
lowercase :Dict = len(a_)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase , lowercase :List[str] = matrix_chain_order(a_)
print('''No. of Operation required: ''' + str(matrix[1][n - 1]))
print_optiomal_solution(a_ , 1 , n - 1)
if __name__ == "__main__":
main()
| 172
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = "maskformer-swin"
__A : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Dict=2_2_4 , snake_case__ : Any=4 , snake_case__ : Dict=3 , snake_case__ : str=9_6 , snake_case__ : List[str]=[2, 2, 6, 2] , snake_case__ : Optional[int]=[3, 6, 1_2, 2_4] , snake_case__ : Optional[Any]=7 , snake_case__ : int=4.0 , snake_case__ : str=True , snake_case__ : Dict=0.0 , snake_case__ : List[Any]=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Tuple=1e-5 , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :Optional[int] = image_size
lowercase :List[Any] = patch_size
lowercase :Optional[Any] = num_channels
lowercase :Union[str, Any] = embed_dim
lowercase :Union[str, Any] = depths
lowercase :List[Any] = len(snake_case__ )
lowercase :Optional[Any] = num_heads
lowercase :Optional[Any] = window_size
lowercase :Optional[int] = mlp_ratio
lowercase :str = qkv_bias
lowercase :int = hidden_dropout_prob
lowercase :List[str] = attention_probs_dropout_prob
lowercase :str = drop_path_rate
lowercase :Optional[Any] = hidden_act
lowercase :Tuple = use_absolute_embeddings
lowercase :Union[str, Any] = layer_norm_eps
lowercase :Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase :Optional[int] = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
lowercase :Optional[int] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(snake_case__ ) + 1 )]
lowercase , lowercase :List[str] = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
| 172
| 1
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a : List[str] = logging.getLogger(__name__)
a : int = 'pytorch_model.bin'
@dataclasses.dataclass
class a :
snake_case_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class a :
snake_case_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
snake_case_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "The name of the task to train on."} , )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class a :
snake_case_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
snake_case_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
snake_case_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
snake_case_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
snake_case_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
snake_case_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
snake_case_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
snake_case_ = dataclasses.field(
default=_lowerCamelCase , metadata={"help": "Random seed for initialization."} , )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda __UpperCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(__UpperCAmelCase ) )
print(__UpperCAmelCase )
snake_case_ = dataset.sort('''probability''', reverse=__UpperCAmelCase )
snake_case_ = dataset.select(range(__UpperCAmelCase ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''', '''label''' )
snake_case_ = dataset.map(lambda __UpperCAmelCase : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(__UpperCAmelCase, F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCAmelCase, index=__UpperCAmelCase )
else:
dataset.to_json(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=__UpperCAmelCase )
snake_case_ = STDataArguments(train_file=__UpperCAmelCase, infer_file=__UpperCAmelCase )
snake_case_ = STTrainingArguments(output_dir=__UpperCAmelCase )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCAmelCase ).items():
setattr(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
for key, value in kwargs.items():
if hasattr(__UpperCAmelCase, __UpperCAmelCase ):
setattr(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F"{args.output_dir}/self-train_iter-{{}}".format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=__UpperCAmelCase )
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(__UpperCAmelCase )
assert os.path.exists(__UpperCAmelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(__UpperCAmelCase, '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCAmelCase, __UpperCAmelCase ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(__UpperCAmelCase, '''best-checkpoint''', __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', __UpperCAmelCase, __UpperCAmelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', __UpperCAmelCase )
finetune(**__UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''', __UpperCAmelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(__UpperCAmelCase, '''best-checkpoint''' )
snake_case_ = os.path.join(__UpperCAmelCase, '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(__UpperCAmelCase, '''best-checkpoint''', __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', __UpperCAmelCase, __UpperCAmelCase, )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', __UpperCAmelCase )
finetune(**__UpperCAmelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCAmelCase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''', __UpperCAmelCase )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(__UpperCAmelCase, '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(__UpperCAmelCase, '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(__UpperCAmelCase, '''test_results_best-checkpoint.json''' )
assert os.path.exists(__UpperCAmelCase )
with open(__UpperCAmelCase, '''r''' ) as f:
snake_case_ = float(json.load(__UpperCAmelCase )[args.eval_metric] )
snake_case_ = os.path.join(__UpperCAmelCase, '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(__UpperCAmelCase )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(__UpperCAmelCase, exist_ok=__UpperCAmelCase )
shutil.copy(__UpperCAmelCase, os.path.join(__UpperCAmelCase, F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(__UpperCAmelCase ):
shutil.copy(__UpperCAmelCase, os.path.join(__UpperCAmelCase, F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(__UpperCAmelCase, F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''', __UpperCAmelCase )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCAmelCase, F"eval_results_iter-{iteration}.json" ), os.path.join(__UpperCAmelCase, '''eval_results_best-iteration.json''' ), )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''', args.eval_metric, __UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCAmelCase, F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ), os.path.join(__UpperCAmelCase, '''eval_results_best-iteration.json''' ), )
| 56
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _lowerCamelCase ):
snake_case_ = "marian"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = decoder_vocab_size or vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class a ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ = {0: '''batch'''}
snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
snake_case_ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ ,snake_case_ = self.num_layers
snake_case_ = min(lowercase_ , lowercase_ )
snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ ,snake_case_ = self.num_layers
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs['''attention_mask'''].dtype
snake_case_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def A_ ( self : List[str] ):
return 1e-4
| 56
| 1
|
"""simple docstring"""
import math
def _A ( lowercase , lowercase ):
"""simple docstring"""
return math.pow(lowercase_ , 2 ) - a
def _A ( lowercase ):
"""simple docstring"""
return 2 * x
def _A ( lowercase ):
"""simple docstring"""
a =2.0
while start <= a:
a =math.pow(lowercase_ , 2 )
return start
def _A ( lowercase , lowercase = 99_99 , lowercase = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a =get_initial_point(lowercase_ )
for _ in range(lowercase_ ):
a =value
a =value - fx(lowercase_ , lowercase_ ) / fx_derivative(lowercase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 362
|
"""simple docstring"""
def _A ( lowercase = 2_00_00_00 ):
"""simple docstring"""
a =[0 for i in range(n + 1 )]
a =1
a =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
a =1
a =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 215
| 0
|
import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
import argparse
snake_case : int = '''docs/source/_static/js/custom.js'''
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
with open(__lowerCAmelCase , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
a__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
a__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
snake_case : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 240
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: str , lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase_: Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
UpperCAmelCase_: Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
UpperCAmelCase_: str = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCAmelCase_: Tuple = value
elif weight_type == "weight_g":
UpperCAmelCase_: str = value
elif weight_type == "weight_v":
UpperCAmelCase_: List[str] = value
elif weight_type == "bias":
UpperCAmelCase_: Tuple = value
else:
UpperCAmelCase_: Union[str, Any] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase_ (lowerCAmelCase__: str , lowerCAmelCase__: Any , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: Any = []
UpperCAmelCase_: Dict = fairseq_model.state_dict()
UpperCAmelCase_: Union[str, Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_: Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase_: Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_: List[Any] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase_: int = True
if "*" in mapped_key:
UpperCAmelCase_: Dict = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
UpperCAmelCase_: List[str] = mapped_key.replace("""*""" , lowerCAmelCase__ )
if "weight_g" in name:
UpperCAmelCase_: List[str] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase_: List[str] = """weight_v"""
elif "weight" in name:
UpperCAmelCase_: int = """weight"""
elif "bias" in name:
UpperCAmelCase_: Optional[int] = """bias"""
else:
UpperCAmelCase_: List[str] = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Tuple , lowerCAmelCase__: str , lowerCAmelCase__: Tuple , lowerCAmelCase__: Any ):
"""simple docstring"""
UpperCAmelCase_: Tuple = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase_: Tuple = name.split(""".""" )
UpperCAmelCase_: List[Any] = int(items[0] )
UpperCAmelCase_: Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCAmelCase_: Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCAmelCase_: Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCAmelCase_: Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCAmelCase_: List[str] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase__ )
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Dict ):
"""simple docstring"""
UpperCAmelCase_: Dict = SEWConfig()
if is_finetuned:
UpperCAmelCase_: Optional[int] = model.wav_encoder.wav_model.cfg
else:
UpperCAmelCase_: int = model.cfg
UpperCAmelCase_: Union[str, Any] = fs_config.conv_bias
UpperCAmelCase_: Optional[Any] = eval(fs_config.conv_feature_layers )
UpperCAmelCase_: Any = [x[0] for x in conv_layers]
UpperCAmelCase_: str = [x[1] for x in conv_layers]
UpperCAmelCase_: Union[str, Any] = [x[2] for x in conv_layers]
UpperCAmelCase_: Optional[int] = """gelu"""
UpperCAmelCase_: Optional[Any] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
UpperCAmelCase_: List[Any] = 0.0
UpperCAmelCase_: int = fs_config.activation_fn.name
UpperCAmelCase_: Optional[Any] = fs_config.encoder_embed_dim
UpperCAmelCase_: List[Any] = 0.02
UpperCAmelCase_: Optional[int] = fs_config.encoder_ffn_embed_dim
UpperCAmelCase_: List[str] = 1e-5
UpperCAmelCase_: str = fs_config.encoder_layerdrop
UpperCAmelCase_: str = fs_config.encoder_attention_heads
UpperCAmelCase_: List[str] = fs_config.conv_pos_groups
UpperCAmelCase_: int = fs_config.conv_pos
UpperCAmelCase_: int = len(lowerCAmelCase__ )
UpperCAmelCase_: Any = fs_config.encoder_layers
UpperCAmelCase_: List[Any] = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCAmelCase_: int = model.cfg
UpperCAmelCase_: Any = fs_config.final_dropout
UpperCAmelCase_: Union[str, Any] = fs_config.layerdrop
UpperCAmelCase_: Dict = fs_config.activation_dropout
UpperCAmelCase_: List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCAmelCase_: Optional[Any] = fs_config.attention_dropout
UpperCAmelCase_: Dict = fs_config.dropout_input
UpperCAmelCase_: Optional[int] = fs_config.dropout
UpperCAmelCase_: Dict = fs_config.mask_channel_length
UpperCAmelCase_: Optional[int] = fs_config.mask_channel_prob
UpperCAmelCase_: Union[str, Any] = fs_config.mask_length
UpperCAmelCase_: List[Any] = fs_config.mask_prob
UpperCAmelCase_: Dict = """Wav2Vec2FeatureExtractor"""
UpperCAmelCase_: Dict = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] , lowerCAmelCase__: Dict , lowerCAmelCase__: Optional[int]=None , lowerCAmelCase__: List[str]=None , lowerCAmelCase__: List[Any]=True ):
"""simple docstring"""
if is_finetuned:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCAmelCase_: Optional[Any] = SEWConfig.from_pretrained(lowerCAmelCase__ )
else:
UpperCAmelCase_: int = convert_config(model[0] , lowerCAmelCase__ )
UpperCAmelCase_: str = model[0].eval()
UpperCAmelCase_: Tuple = True if config.feat_extract_norm == """layer""" else False
UpperCAmelCase_: Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
if is_finetuned:
if dict_path:
UpperCAmelCase_: Union[str, Any] = Dictionary.load(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase_: Dict = target_dict.pad_index
UpperCAmelCase_: Tuple = target_dict.bos_index
UpperCAmelCase_: Optional[int] = target_dict.pad_index
UpperCAmelCase_: str = target_dict.bos_index
UpperCAmelCase_: List[Any] = target_dict.eos_index
UpperCAmelCase_: List[str] = len(target_dict.symbols )
UpperCAmelCase_: Dict = os.path.join(lowerCAmelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = WavaVecaCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase__ , )
UpperCAmelCase_: Optional[Any] = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = SEWForCTC(lowerCAmelCase__ )
else:
UpperCAmelCase_: List[Any] = SEWModel(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 82
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict=None ):
"""simple docstring"""
UpperCAmelCase_: Any = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_: int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCAmelCase_: Optional[int] = finetuning_task
UpperCAmelCase_: int = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Optional[Any] = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_: List[Any] = finetuning_task
UpperCAmelCase_: Optional[Any] = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_: Tuple = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_: Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 300
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
_lowerCAmelCase : Any = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
__UpperCamelCase = RobertaTokenizer
def __init__( self :Dict , snake_case :List[str]=None , snake_case :List[Any]=None , snake_case :Union[str, Any]=None , snake_case :List[str]="replace" , snake_case :Tuple="<s>" , snake_case :Union[str, Any]="</s>" , snake_case :str="</s>" , snake_case :Union[str, Any]="<s>" , snake_case :int="<unk>" , snake_case :Tuple="<pad>" , snake_case :List[str]="<mask>" , snake_case :Any=False , snake_case :Union[str, Any]=True , **snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
A_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : Dict = getattr(snake_case , pre_tok_state.pop("type" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**snake_case )
A_ : Optional[int] = add_prefix_space
A_ : Optional[int] = "post_processor"
A_ : Dict = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
A_ : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : List[Any] = tuple(state["sep"] )
if "cls" in state:
A_ : Optional[Any] = tuple(state["cls"] )
A_ : Tuple = False
if state.get("add_prefix_space" , snake_case ) != add_prefix_space:
A_ : List[Any] = add_prefix_space
A_ : Optional[int] = True
if state.get("trim_offsets" , snake_case ) != trim_offsets:
A_ : List[str] = trim_offsets
A_ : Any = True
if changes_to_apply:
A_ : Optional[Any] = getattr(snake_case , state.pop("type" ) )
A_ : Any = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Dict ):
'''simple docstring'''
A_ : Dict = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
A_ : Any = value
def SCREAMING_SNAKE_CASE ( self :Dict , *snake_case :Tuple , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : Any = kwargs.get("is_split_into_words" , snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :str , snake_case :Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Optional[Any]=None ):
'''simple docstring'''
A_ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[int] , snake_case :Optional[List[int]] = None ):
'''simple docstring'''
A_ : Any = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 300
| 1
|
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowerCAmelCase ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = OmegaConf.load(lowerCAmelCase__ )
UpperCAmelCase_ = torch.load(lowerCAmelCase__ , map_location="cpu" )["""model"""]
UpperCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
UpperCAmelCase_ = {}
UpperCAmelCase_ = """first_stage_model."""
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
UpperCAmelCase_ = {}
UpperCAmelCase_ = """model.diffusion_model."""
for key in keys:
if key.startswith(lowerCAmelCase__ ):
UpperCAmelCase_ = state_dict[key]
UpperCAmelCase_ = config.model.params.first_stage_config.params
UpperCAmelCase_ = config.model.params.unet_config.params
UpperCAmelCase_ = VQModel(**lowerCAmelCase__ ).eval()
vqvae.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = UNetLDMModel(**lowerCAmelCase__ ).eval()
unet.load_state_dict(lowerCAmelCase__ )
UpperCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=lowerCAmelCase__ , )
UpperCAmelCase_ = LDMPipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipeline.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 366
|
'''simple docstring'''
import os
import numpy
import onnx
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = a.name
UpperCAmelCase_ = b.name
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = a == b
UpperCAmelCase_ = name_a
UpperCAmelCase_ = name_b
return res
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case_ , snake_case_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case_ , snake_case_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = os.path.dirname(snake_case_ )
UpperCAmelCase_ = os.path.basename(snake_case_ )
UpperCAmelCase_ = onnx.load(os.path.join(snake_case_ , snake_case_ ) )
UpperCAmelCase_ = list(model.graph.initializer )
UpperCAmelCase_ = set()
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
for i in range(len(snake_case_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case_ )
dup_set.add(snake_case_ )
UpperCAmelCase_ = inits[j].data_type
UpperCAmelCase_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , snake_case_ )
total_reduced_size += mem_size
UpperCAmelCase_ = inits[i].name
UpperCAmelCase_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case_ )
else:
UpperCAmelCase_ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
UpperCAmelCase_ = sorted(snake_case_ )
_remove_dup_initializers_from_model(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = "optimized_" + model_file_name
UpperCAmelCase_ = os.path.join(snake_case_ , snake_case_ )
onnx.save(snake_case_ , snake_case_ )
return new_model
| 106
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.